diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e0576d..e373239 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,5 @@ -## Version 4.4.2 - -### Feature -- Added state upgrade from schema version 2 to schema version 3 +## Version 4.5.0-alfa ### Bugfix -- Fixed incorrect work data_source_static_route -- Fixed bug with old field "with_default_vins" in state +- Fixed bservice and rg schema and flatten +- Add stateUpgrader for k8s_cp diff --git a/Makefile b/Makefile index bc1ce73..2c9b820 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,7 @@ ZIPDIR = ./zip BINARY=${NAME} WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH} MAINPATH = ./cmd/decort/ -VERSION=4.4.2 +VERSION=4.5.0-alfa OS_ARCH=$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH) FILES = ${BINARY}_${VERSION}_darwin_amd64\ diff --git a/go.mod b/go.mod index 6ef1ddf..4aeee0b 100644 --- a/go.mod +++ b/go.mod @@ -8,8 +8,8 @@ require ( github.com/hashicorp/terraform-plugin-docs v0.13.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 github.com/sirupsen/logrus v1.9.0 - golang.org/x/net v0.15.0 - repository.basistech.ru/BASIS/decort-golang-sdk v1.6.5 + golang.org/x/net v0.16.0 + repository.basistech.ru/BASIS/decort-golang-sdk v1.6.9 ) require ( @@ -25,7 +25,7 @@ require ( github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.15.4 // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -67,12 +67,13 @@ require ( github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/zclconf/go-cty v1.12.1 // indirect - golang.org/x/crypto v0.13.0 // indirect - golang.org/x/sys v0.12.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/oauth2 v0.13.0 // indirect + golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect google.golang.org/grpc v1.51.0 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 2ce1d6d..63176ea 100644 --- a/go.sum +++ b/go.sum @@ -65,8 +65,8 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -249,8 +249,8 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -265,9 +265,11 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -293,8 +295,8 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= @@ -322,8 +324,8 @@ google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -339,5 +341,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -repository.basistech.ru/BASIS/decort-golang-sdk v1.6.5 h1:37OLNiSgF3SXjj0ZMRikVrBiNlNdS9NY8QxD9YFfHBY= -repository.basistech.ru/BASIS/decort-golang-sdk v1.6.5/go.mod h1:szsTGa73O75ckCWVGJPvTtRbhA/ubuYrYhMkPjvHlmE= +repository.basistech.ru/BASIS/decort-golang-sdk v1.6.9 h1:FaQQol7gx4i/IzaScGbiVxJpLkBOgG2bOp+De1kW0hY= +repository.basistech.ru/BASIS/decort-golang-sdk v1.6.9/go.mod h1:mwcpnw0dT/PQf6AOJShjlbDNDfNitr0WM77LNKL1qjo= diff --git a/internal/service/cloudbroker/grid/api.go b/internal/dc/utils.go similarity index 61% rename from internal/service/cloudbroker/grid/api.go rename to internal/dc/utils.go index 17184f7..0f41e39 100644 --- a/internal/service/cloudbroker/grid/api.go +++ b/internal/dc/utils.go @@ -1,35 +1,65 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package grid - -const GridListGetAPI = "/restmachine/cloudbroker/grid/list" -const GridGetAPI = "/restmachine/cloudbroker/grid/get" +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. + +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, +Nikita Sorokin, +Tim Tkachev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package dc + +import "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + +func ErrorsToDiagnostics(errs []error) diag.Diagnostics { + if len(errs) == 0 { + return nil + } + + diags := diag.Diagnostics{} + + for _, err := range errs { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + }) + } + + return diags +} + +func ErrorsToWarnings(errs []error) Warnings { + w := Warnings{} + + for _, err := range errs { + w.Add(err) + } + + return w +} diff --git a/internal/provider/data_sources_map.go b/internal/provider/data_sources_map.go index 4b0a70d..3c6f18b 100644 --- a/internal/provider/data_sources_map.go +++ b/internal/provider/data_sources_map.go @@ -38,25 +38,25 @@ import ( "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vins" cb_account "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/account" - cb_disks "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/disks" + // cb_disks "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/disks" cb_extnet "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/extnet" - cb_grid "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/grid" - cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image" - cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice" - cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg" - cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep" - cb_vgpu "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vgpu" + // cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup" + // cb_grid "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/grid" + // cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image" + // cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb" + // cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice" + // cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg" + // cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep" + // cb_stack "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/stack" + // cb_vgpu "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vgpu" ) func newDataSourcesMap() map[string]*schema.Resource { return map[string]*schema.Resource{ - "decort_account": account.DataSourceAccount(), - "decort_account_resource_consumption_list": account.DataSourceAccountResourceConsumptionList(), - "decort_account_resource_consumption_get": account.DataSourceAccountResourceConsumptionGet(), + "decort_account": account.DataSourceAccount(), "decort_resgroup": rg.DataSourceResgroup(), "decort_kvmvm": kvmvm.DataSourceCompute(), "decort_kvmvm_list": kvmvm.DataSourceComputeList(), - "decort_kvmvm_list_deleted": kvmvm.DataSourceComputeListDeleted(), "decort_kvmvm_audits": kvmvm.DataSourceComputeAudits(), "decort_kvmvm_get_audits": kvmvm.DataSourceComputeGetAudits(), "decort_kvmvm_get_console_url": kvmvm.DataSourceComputeGetConsoleUrl(), @@ -64,15 +64,12 @@ func newDataSourcesMap() map[string]*schema.Resource { "decort_kvmvm_pfw_list": kvmvm.DataSourceComputePfwList(), "decort_kvmvm_user_list": kvmvm.DataSourceComputeUserList(), "decort_kvmvm_snapshot_usage": kvmvm.DataSourceComputeSnapshotUsage(), - "decort_kvmvm_vgpu_list": kvmvm.DataSourceComputeVGPUList(), - "decort_kvmvm_pci_device_list": kvmvm.DataSourceComputePCIDeviceList(), "decort_k8s": k8s.DataSourceK8s(), "decort_k8s_list": k8s.DataSourceK8sList(), "decort_k8s_list_deleted": k8s.DataSourceK8sListDeleted(), "decort_k8s_wg": k8s.DataSourceK8sWg(), "decort_k8s_wg_list": k8s.DataSourceK8sWgList(), "decort_k8s_computes": k8s.DataSourceK8sComputes(), - "decort_k8s_wg_cloud_init": k8s.DataSourceK8sWgCloudInit(), "decort_k8ci_list": k8s.DataSourceK8CIList(), "decort_vins": vins.DataSourceVins(), "decort_vins_list": vins.DataSourceVinsList(), @@ -96,8 +93,6 @@ func newDataSourcesMap() map[string]*schema.Resource { "decort_rg_list_lb": rg.DataSourceRgListLb(), "decort_rg_list_pfw": rg.DataSourceRgListPfw(), "decort_rg_list_vins": rg.DataSourceRgListVins(), - "decort_rg_resource_consumption_list": rg.DataSourceRGResourceConsumptionList(), - "decort_rg_resource_consumption_get": rg.DataSourceRGResourceConsumptionGet(), "decort_rg_usage": rg.DataSourceRgUsage(), "decort_disk_list_types_detailed": disks.DataSourceDiskListTypesDetailed(), "decort_disk_list_types": disks.DataSourceDiskListTypes(), @@ -137,36 +132,30 @@ func newDataSourcesMap() map[string]*schema.Resource { "decort_flipgroup_list": flipgroup.DataSourceFlipGroupList(), "decort_stack": stack.DataSourceStack(), "decort_stack_list": stack.DataSourceStackList(), + "decort_account_resource_consumption_list": account.DataSourceAccountResourceConsumptionList(), + "decort_account_resource_consumption_get": account.DataSourceAccountResourceConsumptionGet(), + "decort_kvmvm_list_deleted": kvmvm.DataSourceComputeListDeleted(), + "decort_kvmvm_vgpu_list": kvmvm.DataSourceComputeVGPUList(), + "decort_kvmvm_pci_device_list": kvmvm.DataSourceComputePCIDeviceList(), + "decort_k8s_wg_cloud_init": k8s.DataSourceK8sWgCloudInit(), + "decort_rg_resource_consumption_list": rg.DataSourceRGResourceConsumptionList(), + "decort_rg_resource_consumption_get": rg.DataSourceRGResourceConsumptionGet(), - "decort_cb_account": cb_account.DataSourceAccount(), - "decort_cb_account_list": cb_account.DataSourceAccountList(), - "decort_cb_account_computes_list": cb_account.DataSourceAccountComputesList(), - "decort_cb_account_deleted_list": cb_account.DataSourceAccountDeletedList(), - "decort_cb_account_disks_list": cb_account.DataSourceAccountDisksList(), - "decort_cb_account_flipgroups_list": cb_account.DataSourceAccountFlipGroupsList(), - "decort_cb_account_rg_list": cb_account.DataSourceAccountRGList(), - "decort_cb_account_vins_list": cb_account.DataSourceAccountVinsList(), - "decort_cb_account_audits_list": cb_account.DataSourceAccountAuditsList(), - "decort_cb_extnet": cb_extnet.DataSourceExtnetCB(), - "decort_cb_extnet_list": cb_extnet.DataSourceExtnetListCB(), - "decort_cb_extnet_default": cb_extnet.DataSourceExtnetDefaultCB(), - "decort_cb_disk": cb_disks.DataSourceDisk(), - "decort_cb_disk_list": cb_disks.DataSourceDiskList(), - "decort_cb_image": cb_image.DataSourceImage(), - "decort_cb_grid": cb_grid.DataSourceGrid(), - "decort_cb_grid_list": cb_grid.DataSourceGridList(), - "decort_cb_image_list": cb_image.DataSourceImageList(), - "decort_cb_image_list_stacks": cb_image.DataSourceImageListStacks(), - "decort_cb_pcidevice": cb_pcidevice.DataSourcePcidevice(), - "decort_cb_pcidevice_list": cb_pcidevice.DataSourcePcideviceList(), - "decort_cb_sep_list": cb_sep.DataSourceSepList(), - "decort_cb_sep": cb_sep.DataSourceSep(), - "decort_cb_sep_consumption": cb_sep.DataSourceSepConsumption(), - "decort_cb_sep_disk_list": cb_sep.DataSourceSepDiskList(), - "decort_cb_sep_config": cb_sep.DataSourceSepConfig(), - "decort_cb_sep_pool": cb_sep.DataSourceSepPool(), - "decort_cb_vgpu": cb_vgpu.DataSourceVGPU(), - "decort_cb_rg_list": cb_rg.DataSourceRgList(), + "decort_cb_account": cb_account.DataSourceAccount(), + "decort_cb_account_list": cb_account.DataSourceAccountList(), + "decort_cb_account_computes_list": cb_account.DataSourceAccountComputesList(), + "decort_cb_account_deleted_list": cb_account.DataSourceAccountDeletedList(), + "decort_cb_account_disks_list": cb_account.DataSourceAccountDisksList(), + "decort_cb_account_flipgroups_list": cb_account.DataSourceAccountFlipGroupsList(), + "decort_cb_account_rg_list": cb_account.DataSourceAccountRGList(), + "decort_cb_account_vins_list": cb_account.DataSourceAccountVinsList(), + "decort_cb_account_resource_consumption_get": cb_account.DataSourceAccountResourceConsumptionGet(), + "decort_cb_account_resource_consumption_list": cb_account.DataSourceAccountResourceConsumptionList(), + "decort_cb_account_audits_list": cb_account.DataSourceAccountAuditsList(), + "decort_cb_extnet": cb_extnet.DataSourceExtnetCB(), + "decort_cb_extnet_list": cb_extnet.DataSourceExtnetListCB(), + "decort_cb_extnet_default": cb_extnet.DataSourceExtnetDefaultCB(), + "decort_cb_extnet_static_route_list": cb_extnet.DataSourceStaticRouteList(), + "decort_cb_extnet_static_route": cb_extnet.DataSourceStaticRoute(), } - } diff --git a/internal/provider/resource_map.go b/internal/provider/resource_map.go index c7b5067..e6a8ae2 100644 --- a/internal/provider/resource_map.go +++ b/internal/provider/resource_map.go @@ -36,17 +36,19 @@ import ( "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vins" cb_account "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/account" - cb_disks "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/disks" + // cb_disks "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/disks" cb_extnet "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/extnet" - cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image" - cb_k8s "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8s" - cb_kvmvm "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/kvmvm" - cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice" - cb_pfw "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pfw" - cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg" - cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep" - cb_snapshot "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/snapshot" - cb_vins "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vins" + // cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup" + // cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image" + // cb_k8s "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8s" + // cb_kvmvm "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/kvmvm" + // cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb" + // cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice" + // cb_pfw "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pfw" + // cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg" + // cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep" + // cb_snapshot "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/snapshot" + // cb_vins "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vins" ) func newResourcesMap() map[string]*schema.Resource { @@ -74,22 +76,29 @@ func newResourcesMap() map[string]*schema.Resource { "decort_flipgroup": flipgroup.ResourceFlipgroup(), "decort_vins_static_route": vins.ResourceStaticRoute(), - "decort_cb_account": cb_account.ResourceAccount(), - "decort_cb_extnet": cb_extnet.ResourceExtnetCB(), - "decort_cb_disk": cb_disks.ResourceDisk(), - "decort_cb_image": cb_image.ResourceImage(), - "decort_cb_virtual_image": cb_image.ResourceVirtualImage(), - "decort_cb_cdrom_image": cb_image.ResourceCDROMImage(), - "decort_cb_delete_images": cb_image.ResourceDeleteImages(), - "decort_cb_pcidevice": cb_pcidevice.ResourcePcidevice(), - "decort_cb_sep": cb_sep.ResourceSep(), - "decort_cb_sep_config": cb_sep.ResourceSepConfig(), - "decort_cb_resgroup": cb_rg.ResourceResgroup(), - "decort_cb_kvmvm": cb_kvmvm.ResourceCompute(), - "decort_cb_vins": cb_vins.ResourceVins(), - "decort_cb_pfw": cb_pfw.ResourcePfw(), - "decort_cb_k8s": cb_k8s.ResourceK8s(), - "decort_cb_k8s_wg": cb_k8s.ResourceK8sWg(), - "decort_cb_snapshot": cb_snapshot.ResourceSnapshot(), + "decort_cb_account": cb_account.ResourceAccount(), + "decort_cb_extnet": cb_extnet.ResourceExtnetCB(), + // "decort_cb_disk": cb_disks.ResourceDisk(), + // "decort_cb_image": cb_image.ResourceImage(), + // "decort_cb_virtual_image": cb_image.ResourceVirtualImage(), + // "decort_cb_cdrom_image": cb_image.ResourceCDROMImage(), + // "decort_cb_delete_images": cb_image.ResourceDeleteImages(), + // "decort_cb_pcidevice": cb_pcidevice.ResourcePcidevice(), + // "decort_cb_sep": cb_sep.ResourceSep(), + // "decort_cb_sep_config": cb_sep.ResourceSepConfig(), + // "decort_cb_resgroup": cb_rg.ResourceResgroup(), + // "decort_cb_kvmvm": cb_kvmvm.ResourceCompute(), + // "decort_cb_vins": cb_vins.ResourceVins(), + // "decort_cb_pfw": cb_pfw.ResourcePfw(), + // "decort_cb_k8s": cb_k8s.ResourceK8s(), + // "decort_cb_k8s_wg": cb_k8s.ResourceK8sWg(), + // "decort_cb_snapshot": cb_snapshot.ResourceSnapshot(), + // "decort_cb_flipgroup": cb_flipgroup.ResourceFlipgroup(), + // "decort_cb_lb": cb_lb.ResourceLB(), + // "decort_cb_lb_backend": cb_lb.ResourceLBBackend(), + // "decort_cb_lb_backend_server": cb_lb.ResourceLBBackendServer(), + // "decort_cb_lb_frontend": cb_lb.ResourceLBFrontend(), + // "decort_cb_lb_frontend_bind": cb_lb.ResourceLBFrontendBind(), + "decort_cb_extnet_static_route": cb_extnet.ResourceStaticRoute(), } } diff --git a/internal/service/cloudapi/bservice/data_source_bservice_snapshot_list.go b/internal/service/cloudapi/bservice/data_source_bservice_snapshot_list.go index e1b6c55..3d075b8 100644 --- a/internal/service/cloudapi/bservice/data_source_bservice_snapshot_list.go +++ b/internal/service/cloudapi/bservice/data_source_bservice_snapshot_list.go @@ -49,8 +49,8 @@ func dataSourceBasicServiceSnapshotListRead(ctx context.Context, d *schema.Resou id := uuid.New() d.SetId(id.String()) - d.Set("items", flattenBasicServiceSnapshots(basicServiceSnapshotList)) - + d.Set("items", flattenBasicServiceSnapshotsList(basicServiceSnapshotList)) + d.Set("entry_count", basicServiceSnapshotList.EntryCount) return nil } diff --git a/internal/service/cloudapi/bservice/flattens.go b/internal/service/cloudapi/bservice/flattens.go index 9042059..f3ef692 100644 --- a/internal/service/cloudapi/bservice/flattens.go +++ b/internal/service/cloudapi/bservice/flattens.go @@ -111,7 +111,7 @@ func flattenBasicServiceComputes(bscs bservice.ListComputes) []map[string]interf } func flattenBasicServiceSnapshots(bsrvss bservice.ListSnapshots) []map[string]interface{} { - res := make([]map[string]interface{}, 0) + res := make([]map[string]interface{}, 0, len(bsrvss)) for _, bsrvs := range bsrvss { temp := map[string]interface{}{ "guid": bsrvs.GUID, @@ -123,3 +123,17 @@ func flattenBasicServiceSnapshots(bsrvss bservice.ListSnapshots) []map[string]in } return res } + +func flattenBasicServiceSnapshotsList(bsrvss *bservice.ListInfoSnapshots) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(bsrvss.Data)) + for _, bsrvs := range bsrvss.Data { + temp := map[string]interface{}{ + "guid": bsrvs.GUID, + "label": bsrvs.Label, + "timestamp": bsrvs.Timestamp, + "valid": bsrvs.Valid, + } + res = append(res, temp) + } + return res +} diff --git a/internal/service/cloudapi/bservice/utility_bservice_snapshot_list.go b/internal/service/cloudapi/bservice/utility_bservice_snapshot_list.go index b791f09..73c7083 100644 --- a/internal/service/cloudapi/bservice/utility_bservice_snapshot_list.go +++ b/internal/service/cloudapi/bservice/utility_bservice_snapshot_list.go @@ -42,7 +42,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func utilityBasicServiceSnapshotListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (bservice.ListSnapshots, error) { +func utilityBasicServiceSnapshotListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*bservice.ListInfoSnapshots, error) { c := m.(*controller.ControllerCfg) var id uint64 diff --git a/internal/service/cloudapi/k8s/old_schemas.go b/internal/service/cloudapi/k8s/old_schemas.go new file mode 100644 index 0000000..1554046 --- /dev/null +++ b/internal/service/cloudapi/k8s/old_schemas.go @@ -0,0 +1,396 @@ +package k8s + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func resourceK8sCPSchemaV1() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the cluster.", + }, + "rg_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Resource group ID that this instance belongs to.", + }, + "k8sci_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "ID of the k8s catalog item to base this instance on.", + }, + "network_plugin": { + Type: schema.TypeString, + Required: true, + Description: "Network plugin to be used", + ValidateFunc: validation.StringInSlice([]string{"flannel", "weavenet", "calico"}, true), + }, + "num": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntInSlice([]int{1, 3}), + Description: "Number of VMs to create. Can be either 1 or 3", + }, + "cpu": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Node CPU count.", + }, + "ram": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Node RAM in MB.", + }, + "disk": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Node boot disk size in GB.", + }, + "sep_id": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Storage Endpoint ID", + }, + "sep_pool": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Storage Endpoint Pool", + }, + "with_lb": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Create k8s with load balancer if true.", + }, + "extnet_only": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Use only selected ExtNet for infrastructure connections", + }, + // /4.4.0 + "cloud_init": { + Type: schema.TypeString, + Optional: true, + Description: "Meta data for working group computes, format YAML 'user_data': 1111", + }, + "join_config": { + Type: schema.TypeString, + Optional: true, + Description: "is used to configure the behavior and settings for joining a node to a cluster. It includes parameters such as the cluster's control plane endpoint, token, and certificate key. insert a valid JSON string with all levels of nesting.", + }, + "kube_proxy_config": { + Type: schema.TypeString, + Optional: true, + Description: "is used to configure the behavior and settings of the Kube-proxy, which is responsible for network proxying and load balancing within the cluster. It includes parameters such as proxy mode, cluster IP ranges, and other Kube-proxy specific configurations. insert a valid JSON string with all levels of nesting.", + }, + "kubelet_config": { + Type: schema.TypeString, + Optional: true, + Description: "is used to configure the behavior and settings of the Kubelet, which is the primary node agent that runs on each node in the cluster. It includes parameters such as node IP address, resource allocation, pod eviction policies, and other Kubelet-specific configurations. insert a valid JSON string with all levels of nesting.", + }, + "cluster_config": { + Type: schema.TypeString, + Optional: true, + Description: "is used to define global settings and configurations for the entire cluster. It includes parameters such as cluster name, DNS settings, authentication methods, and other cluster-wide configurations. insert a valid JSON string with all levels of nesting.", + }, + "init_config": { + Type: schema.TypeString, + Optional: true, + Description: "is used to define settings and actions that should be performed before any other component in the cluster starts. It allows you to configure things like node registration, network setup, and other initialization tasks. insert a valid JSON string with all levels of nesting.", + }, + "additional_sans": { + Type: schema.TypeList, + Optional: true, + Description: "Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "ha_mode": { + Type: schema.TypeBool, + Optional: true, + Description: "Use Highly Available schema for LB deploy", + }, + "lb_sysctl_params": { + Type: schema.TypeString, + Optional: true, + Description: "Custom sysctl values for Load Balancer instance. Applied on boot.", + }, + "oidc_cert": { + Type: schema.TypeString, + Optional: true, + Description: "insert ssl certificate in x509 pem format", + }, + //// + "extnet_id": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + Description: "ID of the external network to connect workers to. If omitted network will be chosen by the platfom.", + }, + "desc": { + Type: schema.TypeString, + Optional: true, + Description: "Text description of this instance.", + }, + "start": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Start k8s cluster.", + }, + "detailed_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "compute_id": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "tech_status": { + Type: schema.TypeString, + Computed: true, + }, + "interfaces": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "def_gw": { + Type: schema.TypeString, + Computed: true, + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "natable_vins_ip": { + Type: schema.TypeString, + Computed: true, + }, + "natable_vins_network": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "master_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Master group ID.", + }, + "master_name": { + Type: schema.TypeString, + Computed: true, + Description: "Master group name.", + }, + "acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account_acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "explicit": { + Type: schema.TypeBool, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "right": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "user_group_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "k8s_acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "explicit": { + Type: schema.TypeBool, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "right": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "user_group_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "rg_acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "explicit": { + Type: schema.TypeBool, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "right": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "user_group_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "account_id": { + Type: schema.TypeInt, + Computed: true, + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + }, + "bservice_id": { + Type: schema.TypeInt, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "k8s_ci_name": { + Type: schema.TypeString, + Computed: true, + }, + "lb_id": { + Type: schema.TypeInt, + Computed: true, + }, + "k8s_id": { + Type: schema.TypeInt, + Computed: true, + }, + "lb_ip": { + Type: schema.TypeString, + Computed: true, + Description: "IP address of default load balancer.", + }, + "rg_name": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "tech_status": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "kubeconfig": { + Type: schema.TypeString, + Computed: true, + Description: "Kubeconfig for cluster access.", + }, + "vins_id": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "ID of default vins for this instace.", + }, + }, + } +} diff --git a/internal/service/cloudapi/k8s/resource_k8s.go b/internal/service/cloudapi/k8s/resource_k8s.go index dac7178..119bce5 100644 --- a/internal/service/cloudapi/k8s/resource_k8s.go +++ b/internal/service/cloudapi/k8s/resource_k8s.go @@ -199,6 +199,12 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{ createReq.ExtNetID = 0 } + if vins, ok := d.GetOk("vins_id"); ok { + createReq.VinsId = uint64(vins.(int)) + } else { + createReq.VinsId = 0 + } + if desc, ok := d.GetOk("desc"); ok { createReq.Description = desc.(string) } diff --git a/internal/service/cloudapi/k8s/resource_k8s_cp.go b/internal/service/cloudapi/k8s/resource_k8s_cp.go index 0eee12c..1e3ca0d 100644 --- a/internal/service/cloudapi/k8s/resource_k8s_cp.go +++ b/internal/service/cloudapi/k8s/resource_k8s_cp.go @@ -158,9 +158,9 @@ func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interfac createReq.JoinConfiguration = joinConfig.(string) } - if cloudInit, ok := d.GetOk("cloud_init"); ok { - createReq.UserData = cloudInit.(string) - } + // if cloudInit, ok := d.GetOk("cloud_init"); ok { + // createReq.UserData = cloudInit.(string) + // } if initConfig, ok := d.GetOk("init_config"); ok { createReq.InitConfiguration = initConfig.(string) @@ -185,6 +185,12 @@ func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interfac createReq.ExtNetID = 0 } + if vins, ok := d.GetOk("vins_id"); ok { + createReq.VinsId = uint64(vins.(int)) + } else { + createReq.VinsId = 0 + } + if desc, ok := d.GetOk("desc"); ok { createReq.Description = desc.(string) } @@ -625,12 +631,6 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema { Default: false, Description: "Use only selected ExtNet for infrastructure connections", }, - ///4.4.0 - "cloud_init": { - Type: schema.TypeString, - Optional: true, - Description: "Meta data for working group computes, format YAML 'user_data': 1111", - }, "join_config": { Type: schema.TypeString, Optional: true, @@ -794,6 +794,7 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema { }, "vins_id": { Type: schema.TypeInt, + Optional: true, Computed: true, Description: "ID of default vins for this instace.", }, @@ -802,7 +803,7 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema { func ResourceK8sCP() *schema.Resource { return &schema.Resource{ - SchemaVersion: 1, + SchemaVersion: 2, CreateContext: resourceK8sCPCreate, ReadContext: resourceK8sCPRead, @@ -822,5 +823,12 @@ func ResourceK8sCP() *schema.Resource { }, Schema: resourceK8sCPSchemaMake(), + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceK8sCPSchemaV1().CoreConfigSchema().ImpliedType(), + Upgrade: resourceK8sCPStateUpgradeV1, + Version: 1, + }, + }, } } diff --git a/internal/service/cloudapi/k8s/state_upgraders.go b/internal/service/cloudapi/k8s/state_upgraders.go new file mode 100644 index 0000000..dee4b02 --- /dev/null +++ b/internal/service/cloudapi/k8s/state_upgraders.go @@ -0,0 +1,15 @@ +package k8s + +import ( + "context" + + log "github.com/sirupsen/logrus" +) + +func resourceK8sCPStateUpgradeV1(ctx context.Context, rawState map[string]interface{}, meta any) (map[string]interface{}, error) { + log.Debug("resourceK8sCPStateUpgradeV1: upgrading state") + + delete(rawState, "cloud_init") + + return rawState, nil +} diff --git a/internal/service/cloudapi/rg/data_source_rg.go b/internal/service/cloudapi/rg/data_source_rg.go index e857eb4..180eb05 100644 --- a/internal/service/cloudapi/rg/data_source_rg.go +++ b/internal/service/cloudapi/rg/data_source_rg.go @@ -393,7 +393,7 @@ func dataSourceRgSchemaMake() map[string]*schema.Schema { } func dataSourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - rg, err := utilityDataResgroupCheckPresence(ctx, d, m) + rg, err := utilityResgroupCheckPresence(ctx, d, m) if err != nil { d.SetId("") // ensure ID is empty in this case return diag.FromErr(err) diff --git a/internal/service/cloudapi/rg/data_source_rg_affinity_groups_list.go b/internal/service/cloudapi/rg/data_source_rg_affinity_groups_list.go index 9d0f1b5..9f869cd 100644 --- a/internal/service/cloudapi/rg/data_source_rg_affinity_groups_list.go +++ b/internal/service/cloudapi/rg/data_source_rg_affinity_groups_list.go @@ -82,8 +82,17 @@ func dataSourceRgAffinityGroupsListSchemaMake() map[string]*schema.Schema { "ids": { Type: schema.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "node_id": { + Type: schema.TypeInt, + Computed: true, + }, + }, }, }, }, diff --git a/internal/service/cloudapi/rg/flattens.go b/internal/service/cloudapi/rg/flattens.go index 79e6bae..d7c7746 100644 --- a/internal/service/cloudapi/rg/flattens.go +++ b/internal/service/cloudapi/rg/flattens.go @@ -461,7 +461,7 @@ func flattenRgListLb(listLb *rg.ListLB) []map[string]interface{} { } func flattenRgListPfw(listPfw *rg.ListPortForwards) []map[string]interface{} { - res := make([]map[string]interface{}, 0, len (listPfw.Data)) + res := make([]map[string]interface{}, 0, len(listPfw.Data)) for _, pfw := range listPfw.Data { temp := map[string]interface{}{ "public_port_end": pfw.PublicPortEnd, @@ -538,10 +538,25 @@ func flattenRgAffinityGroupComputes(list rg.ListAffinityGroupsComputes) []map[st func flattenRgListGroups(list *rg.ListAffinityGroups) []map[string]interface{} { res := make([]map[string]interface{}, 0, len(list.Data)) - for groupKey, groupVal := range list.Data { + for _, groupVal := range list.Data { + for label, ag := range groupVal { + temp := map[string]interface{}{ + "label": label, + "ids": flattenRgAffinityListGroup(ag), + } + res = append(res, temp) + } + } + + return res +} + +func flattenRgAffinityListGroup(list rg.ListAffinityGroup) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(list)) + for _, ag := range list { temp := map[string]interface{}{ - "label": groupKey, - "ids": groupVal, + "id": ag.ID, + "node_id": ag.NodeID, } res = append(res, temp) } @@ -564,10 +579,10 @@ func flattenRGResourceConsumptionList(rg *rg.ListResourceConsumption) []map[stri res := make([]map[string]interface{}, 0, len(rg.Data)) for _, rc := range rg.Data { temp := map[string]interface{}{ - "consumed": flattenResource(rc.Consumed), - "reserved": flattenResource(rc.Reserved), + "consumed": flattenResource(rc.Consumed), + "reserved": flattenResource(rc.Reserved), "resource_limits": flattenRgResourceLimits(rc.ResourceLimits), - "rg_id": rc.RGID, + "rg_id": rc.RGID, } res = append(res, temp) } diff --git a/internal/service/cloudapi/rg/resource_rg.go b/internal/service/cloudapi/rg/resource_rg.go index ab86d12..6be15ee 100644 --- a/internal/service/cloudapi/rg/resource_rg.go +++ b/internal/service/cloudapi/rg/resource_rg.go @@ -185,7 +185,6 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter } } } - } if defNet, ok := d.GetOk("def_net"); ok { @@ -380,7 +379,7 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter } if hasChanged { - rgData, err = utilityDataResgroupCheckPresence(ctx, d, m) + rgData, err = utilityResgroupCheckPresence(ctx, d, m) if err != nil { d.SetId("") return diag.FromErr(err) @@ -789,14 +788,6 @@ func ResourceRgSchemaMake() map[string]*schema.Schema { Schema: aclSchemaMake(), }, }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, "deleted_by": { Type: schema.TypeString, Computed: true, diff --git a/internal/service/cloudapi/rg/utility_rg.go b/internal/service/cloudapi/rg/utility_rg.go index 1b6ed04..70c0313 100644 --- a/internal/service/cloudapi/rg/utility_rg.go +++ b/internal/service/cloudapi/rg/utility_rg.go @@ -63,21 +63,3 @@ func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m return rgData, nil } - -func utilityDataResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*rg.RecordResourceGroup, error) { - c := m.(*controller.ControllerCfg) - req := rg.GetRequest{ - RGID: uint64(d.Get("rg_id").(int)), - } - - if reason, ok := d.GetOk("reason"); ok { - req.Reason = reason.(string) - } - - rgData, err := c.CloudAPI().RG().Get(ctx, req) - if err != nil { - return nil, err - } - - return rgData, nil -} diff --git a/internal/service/cloudbroker/account/account_compute_ds_subresource.go b/internal/service/cloudbroker/account/account_compute_ds_subresource.go deleted file mode 100644 index 52103fe..0000000 --- a/internal/service/cloudbroker/account/account_compute_ds_subresource.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - -func dataSourceAccountComputeSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Computed: true, - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - }, - "cpus": { - Type: schema.TypeInt, - Computed: true, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "compute_id": { - Type: schema.TypeInt, - Computed: true, - }, - "compute_name": { - Type: schema.TypeString, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "registered": { - Type: schema.TypeBool, - Computed: true, - }, - "rg_id": { - Type: schema.TypeInt, - Computed: true, - }, - "rg_name": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "tech_status": { - Type: schema.TypeString, - Computed: true, - }, - "total_disks_size": { - Type: schema.TypeInt, - Computed: true, - }, - "updated_by": { - Type: schema.TypeString, - Computed: true, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - "user_managed": { - Type: schema.TypeBool, - Computed: true, - }, - "vins_connected": { - Type: schema.TypeInt, - Computed: true, - }, - } -} diff --git a/internal/service/cloudbroker/account/account_ds.go b/internal/service/cloudbroker/account/account_ds.go deleted file mode 100644 index 61d6205..0000000 --- a/internal/service/cloudbroker/account/account_ds.go +++ /dev/null @@ -1,262 +0,0 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - -func dataSourceAccountSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Required: true, - }, - "dc_location": { - Type: schema.TypeString, - Computed: true, - }, - "resources": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "current": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeInt, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "reserved": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeInt, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - "ckey": { - Type: schema.TypeString, - Computed: true, - }, - // "meta": { - // Type: schema.TypeList, - // Computed: true, - // Elem: &schema.Schema{ - // Type: schema.TypeString, - // }, - // }, - "acl": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "explicit": { - Type: schema.TypeBool, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "right": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "user_group_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "company": { - Type: schema.TypeString, - Computed: true, - }, - "companyurl": { - Type: schema.TypeString, - Computed: true, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deactivation_time": { - Type: schema.TypeFloat, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "displayname": { - Type: schema.TypeString, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - }, - "resource_limits": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cu_c": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_d": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_i": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_m": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_np": { - Type: schema.TypeFloat, - Computed: true, - }, - "gpu_units": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - "send_access_emails": { - Type: schema.TypeBool, - Computed: true, - }, - // "service_account": { - // Type: schema.TypeBool, - // Computed: true, - // }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - "version": { - Type: schema.TypeInt, - Computed: true, - }, - "vins": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - } - return res -} diff --git a/internal/service/cloudbroker/account/account_fg_ds_subresource.go b/internal/service/cloudbroker/account/account_fg_ds_subresource.go deleted file mode 100644 index 0e911b3..0000000 --- a/internal/service/cloudbroker/account/account_fg_ds_subresource.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - -func dataSourceAccountFlipGroupSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Computed: true, - }, - "client_type": { - Type: schema.TypeString, - Computed: true, - }, - "conn_type": { - Type: schema.TypeString, - Computed: true, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "default_gw": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "fg_id": { - Type: schema.TypeInt, - Computed: true, - }, - "ip": { - Type: schema.TypeString, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "fg_name": { - Type: schema.TypeString, - Computed: true, - }, - "net_id": { - Type: schema.TypeInt, - Computed: true, - }, - "net_type": { - Type: schema.TypeString, - Computed: true, - }, - "netmask": { - Type: schema.TypeInt, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "updated_by": { - Type: schema.TypeString, - Computed: true, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - } -} diff --git a/internal/service/cloudbroker/account/account_item_ds_subresource.go b/internal/service/cloudbroker/account/account_item_ds_subresource.go deleted file mode 100644 index 5c7eab9..0000000 --- a/internal/service/cloudbroker/account/account_item_ds_subresource.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - -func dataSourceAccountItemSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "dc_location": { - Type: schema.TypeString, - Computed: true, - }, - "ckey": { - Type: schema.TypeString, - Computed: true, - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "acl": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "explicit": { - Type: schema.TypeBool, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "right": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "user_group_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "company": { - Type: schema.TypeString, - Computed: true, - }, - "companyurl": { - Type: schema.TypeString, - Computed: true, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deactivation_time": { - Type: schema.TypeFloat, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "displayname": { - Type: schema.TypeString, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "account_id": { - Type: schema.TypeInt, - Computed: true, - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - }, - "resource_limits": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cu_c": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_d": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_i": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_m": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_np": { - Type: schema.TypeFloat, - Computed: true, - }, - "gpu_units": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - "send_access_emails": { - Type: schema.TypeBool, - Computed: true, - }, - "service_account": { - Type: schema.TypeBool, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - "version": { - Type: schema.TypeInt, - Computed: true, - }, - "vins": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - } -} diff --git a/internal/service/cloudbroker/account/account_rs.go b/internal/service/cloudbroker/account/account_rs.go index 97775f4..e9582b8 100644 --- a/internal/service/cloudbroker/account/account_rs.go +++ b/internal/service/cloudbroker/account/account_rs.go @@ -56,6 +56,14 @@ func resourceAccountSchemaMake() map[string]*schema.Schema { Default: true, Description: "if true send emails when a user is granted access to resources", }, + "uniq_pools": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, "users": { Type: schema.TypeList, Optional: true, @@ -77,6 +85,18 @@ func resourceAccountSchemaMake() map[string]*schema.Schema { }, }, }, + "cpu_allocation_parameter": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "set cpu allocation parameter", + }, + "cpu_allocation_ratio": { + Type: schema.TypeFloat, + Optional: true, + Computed: true, + Description: "set cpu allocation ratio", + }, "restore": { Type: schema.TypeBool, Optional: true, @@ -91,6 +111,7 @@ func resourceAccountSchemaMake() map[string]*schema.Schema { "enable": { Type: schema.TypeBool, Optional: true, + Default: true, Description: "enable/disable account", }, "resource_limits": { @@ -106,6 +127,10 @@ func resourceAccountSchemaMake() map[string]*schema.Schema { Computed: true, }, "cu_d": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_dm": { Type: schema.TypeFloat, Optional: true, Computed: true, @@ -142,78 +167,6 @@ func resourceAccountSchemaMake() map[string]*schema.Schema { Type: schema.TypeString, Computed: true, }, - "resources": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "current": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeInt, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "reserved": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeInt, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - }, - }, - }, "ckey": { Type: schema.TypeString, Computed: true, @@ -293,10 +246,13 @@ func resourceAccountSchemaMake() map[string]*schema.Schema { Type: schema.TypeInt, Computed: true, }, - // "service_account": { - // Type: schema.TypeBool, - // Computed: true, - // }, + "resource_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, "status": { Type: schema.TypeString, Computed: true, diff --git a/internal/service/cloudbroker/account/account_vins_ds_subresource.go b/internal/service/cloudbroker/account/account_vins_ds_subresource.go deleted file mode 100644 index 2dba835..0000000 --- a/internal/service/cloudbroker/account/account_vins_ds_subresource.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - -func dataSourceAccountVinsSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Computed: true, - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - }, - "computes": { - Type: schema.TypeInt, - Computed: true, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "external_ip": { - Type: schema.TypeString, - Computed: true, - }, - "vin_id": { - Type: schema.TypeInt, - Computed: true, - }, - "vin_name": { - Type: schema.TypeString, - Computed: true, - }, - "network": { - Type: schema.TypeString, - Computed: true, - }, - "pri_vnf_dev_id": { - Type: schema.TypeInt, - Computed: true, - }, - "rg_id": { - Type: schema.TypeInt, - Computed: true, - }, - "rg_name": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "updated_by": { - Type: schema.TypeString, - Computed: true, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - } -} diff --git a/internal/service/cloudbroker/account/api.go b/internal/service/cloudbroker/account/api.go deleted file mode 100644 index 3c102e9..0000000 --- a/internal/service/cloudbroker/account/api.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -const accountAddUserAPI = "/restmachine/cloudbroker/account/addUser" -const accountAuditsAPI = "/restmachine/cloudbroker/account/audits" -const accountCreateAPI = "/restmachine/cloudbroker/account/create" -const accountDeleteAPI = "/restmachine/cloudbroker/account/delete" -const accountDeleteUserAPI = "/restmachine/cloudbroker/account/deleteUser" -const accountDisableAPI = "/restmachine/cloudbroker/account/disable" -const accountEnableAPI = "/restmachine/cloudbroker/account/enable" -const accountGetAPI = "/restmachine/cloudbroker/account/get" -const accountListAPI = "/restmachine/cloudbroker/account/list" -const accountListComputesAPI = "/restmachine/cloudbroker/account/listComputes" -const accountListDeletedAPI = "/restmachine/cloudbroker/account/listDeleted" -const accountListDisksAPI = "/restmachine/cloudbroker/account/listDisks" -const accountListFlipGroupsAPI = "/restmachine/cloudbroker/account/listFlipGroups" -const accountListRGAPI = "/restmachine/cloudbroker/account/listRG" -const accountListVinsAPI = "/restmachine/cloudbroker/account/listVins" -const accountRestoreAPI = "/restmachine/cloudbroker/account/restore" -const accountUpdateAPI = "/restmachine/cloudbroker/account/update" -const accountUpdateUserAPI = "/restmachine/cloudbroker/account/updateUser" - -//currently unused -//const accountsEnableAPI = "/restmachine/cloudbroker/account/enableAccounts" -//const accountsDisableAPI = "/restmachine/cloudbroker/account/disableAccounts" -//const accountsDeleteAPI = "/restmachine/cloudbroker/account/deleteAccounts" diff --git a/internal/service/cloudbroker/account/data_source_account.go b/internal/service/cloudbroker/account/data_source_account.go index a46df59..65bd378 100644 --- a/internal/service/cloudbroker/account/data_source_account.go +++ b/internal/service/cloudbroker/account/data_source_account.go @@ -1,8 +1,9 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Nikita Sorokin, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -66,3 +67,174 @@ func DataSourceAccount() *schema.Resource { Schema: dataSourceAccountSchemaMake(), } } + +func dataSourceAccountSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeInt, + Required: true, + }, + "dc_location": { + Type: schema.TypeString, + Computed: true, + }, + "ckey": { + Type: schema.TypeString, + Computed: true, + }, + "acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "explicit": { + Type: schema.TypeBool, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "right": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "user_group_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "company": { + Type: schema.TypeString, + Computed: true, + }, + "companyurl": { + Type: schema.TypeString, + Computed: true, + }, + "cpu_allocation_parameter": { + Type: schema.TypeString, + Computed: true, + }, + "cpu_allocation_ratio": { + Type: schema.TypeFloat, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "deactivation_time": { + Type: schema.TypeInt, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "displayname": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + }, + "resource_limits": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cu_c": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_d": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_dm": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_i": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_m": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_np": { + Type: schema.TypeFloat, + Computed: true, + }, + "gpu_units": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + "resource_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "send_access_emails": { + Type: schema.TypeBool, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "uniq_pools": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "version": { + Type: schema.TypeInt, + Computed: true, + }, + "vins": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + } +} + diff --git a/internal/service/cloudbroker/account/data_source_account_audits_list.go b/internal/service/cloudbroker/account/data_source_account_audits_list.go index 3474cdb..ae49ff8 100644 --- a/internal/service/cloudbroker/account/data_source_account_audits_list.go +++ b/internal/service/cloudbroker/account/data_source_account_audits_list.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -65,7 +65,28 @@ func dataSourceAccountAuditsListSchemaMake() map[string]*schema.Schema { Computed: true, Description: "Search Result", Elem: &schema.Resource{ - Schema: dataSourceAccountAuditSchemaMake(), + Schema: map[string]*schema.Schema{ + "call": { + Type: schema.TypeString, + Computed: true, + }, + "responsetime": { + Type: schema.TypeFloat, + Computed: true, + }, + "statuscode": { + Type: schema.TypeInt, + Computed: true, + }, + "timestamp": { + Type: schema.TypeFloat, + Computed: true, + }, + "user": { + Type: schema.TypeString, + Computed: true, + }, + }, }, }, } @@ -85,4 +106,4 @@ func DataSourceAccountAuditsList() *schema.Resource { Schema: dataSourceAccountAuditsListSchemaMake(), } -} +} \ No newline at end of file diff --git a/internal/service/cloudbroker/account/data_source_account_computes_list.go b/internal/service/cloudbroker/account/data_source_account_computes_list.go index 1512336..78e61fc 100644 --- a/internal/service/cloudbroker/account/data_source_account_computes_list.go +++ b/internal/service/cloudbroker/account/data_source_account_computes_list.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -49,6 +49,7 @@ func dataSourceAccountComputesListRead(ctx context.Context, d *schema.ResourceDa id := uuid.New() d.SetId(id.String()) d.Set("items", flattenAccountComputesList(accountComputesList)) + d.Set("entry_count", accountComputesList.EntryCount) return nil } @@ -60,18 +61,154 @@ func dataSourceAccountComputesListSchemaMake() map[string]*schema.Schema { Required: true, Description: "ID of the account", }, + "compute_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by compute ID", + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by compute name", + }, + "rg_name": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by RG name", + }, + "rg_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by RG ID", + }, + "tech_status": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by tech. status", + }, + "ip_address": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by IP address", + }, + "extnet_name": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by extnet name", + }, + "extnet_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by extnet ID", + }, + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "Page number", + }, + "size": { + Type: schema.TypeInt, + Optional: true, + Description: "Page size", + }, "items": { Type: schema.TypeList, Computed: true, Description: "Search Result", Elem: &schema.Resource{ - Schema: dataSourceAccountComputeSchemaMake(), + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeInt, + Computed: true, + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + }, + "cpus": { + Type: schema.TypeInt, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "compute_id": { + Type: schema.TypeInt, + Computed: true, + }, + "compute_name": { + Type: schema.TypeString, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "registered": { + Type: schema.TypeBool, + Computed: true, + }, + "rg_id": { + Type: schema.TypeInt, + Computed: true, + }, + "rg_name": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "tech_status": { + Type: schema.TypeString, + Computed: true, + }, + "total_disks_size": { + Type: schema.TypeInt, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "user_managed": { + Type: schema.TypeBool, + Computed: true, + }, + "vins_connected": { + Type: schema.TypeInt, + Computed: true, + }, + }, }, }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + }, } return res } + func DataSourceAccountComputesList() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, @@ -85,4 +222,4 @@ func DataSourceAccountComputesList() *schema.Resource { Schema: dataSourceAccountComputesListSchemaMake(), } -} +} \ No newline at end of file diff --git a/internal/service/cloudbroker/account/data_source_account_deleted_list.go b/internal/service/cloudbroker/account/data_source_account_deleted_list.go index dc2b700..61838fa 100644 --- a/internal/service/cloudbroker/account/data_source_account_deleted_list.go +++ b/internal/service/cloudbroker/account/data_source_account_deleted_list.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -40,6 +40,21 @@ import ( "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" ) +func DataSourceAccountDeletedList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceAccountDeletedListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceAccountListDeletedSchemaMake(), + } +} + func dataSourceAccountDeletedListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { accountDeletedList, err := utilityAccountDeletedListCheckPresence(ctx, d, m) if err != nil { @@ -49,21 +64,221 @@ func dataSourceAccountDeletedListRead(ctx context.Context, d *schema.ResourceDat id := uuid.New() d.SetId(id.String()) d.Set("items", flattenListDeleted(accountDeletedList)) + d.Set("entry_count", accountDeletedList.EntryCount) return nil } -func DataSourceAccountDeletedList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceAccountDeletedListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, +func dataSourceAccountListDeletedSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "by_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by ID", + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by name", + }, + "acl": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by ACL", + }, + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "Page number", + }, + "size": { + Type: schema.TypeInt, + Optional: true, + Description: "Page size", + }, + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dc_location": { + Type: schema.TypeString, + Computed: true, + }, + "ckey": { + Type: schema.TypeString, + Computed: true, + }, + "meta": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "explicit": { + Type: schema.TypeBool, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "right": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "user_group_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "company": { + Type: schema.TypeString, + Computed: true, + }, + "companyurl": { + Type: schema.TypeString, + Computed: true, + }, + "cpu_allocation_parameter": { + Type: schema.TypeString, + Computed: true, + }, + "cpu_allocation_ratio": { + Type: schema.TypeFloat, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "deactivation_time": { + Type: schema.TypeFloat, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "displayname": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "account_id": { + Type: schema.TypeInt, + Computed: true, + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + }, + "resource_limits": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cu_c": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_d": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_dm": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_i": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_m": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_np": { + Type: schema.TypeFloat, + Computed: true, + }, + "gpu_units": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + "resource_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "send_access_emails": { + Type: schema.TypeBool, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "uniq_pools": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "version": { + Type: schema.TypeInt, + Computed: true, + }, + "vins": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, }, - - Schema: dataSourceAccountListSchemaMake(), } } diff --git a/internal/service/cloudbroker/account/data_source_account_disks_list.go b/internal/service/cloudbroker/account/data_source_account_disks_list.go index 5df1c6e..d30f2e3 100644 --- a/internal/service/cloudbroker/account/data_source_account_disks_list.go +++ b/internal/service/cloudbroker/account/data_source_account_disks_list.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -48,6 +48,7 @@ func dataSourceAccountDisksListRead(ctx context.Context, d *schema.ResourceData, id := uuid.New() d.SetId(id.String()) d.Set("items", flattenAccountDisksList(accountDisksList)) + d.Set("entry_count", accountDisksList.EntryCount) return nil } @@ -59,14 +60,77 @@ func dataSourceAccountDisksListSchemaMake() map[string]*schema.Schema { Required: true, Description: "ID of the account", }, + "disk_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by disk ID", + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by disk name", + }, + "disk_max_size": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by disk max size", + }, + "type": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by disk type", + }, + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "Page number", + }, + "size": { + Type: schema.TypeInt, + Optional: true, + Description: "Page size", + }, "items": { Type: schema.TypeList, Computed: true, Description: "Search Result", Elem: &schema.Resource{ - Schema: dataSourceAccountDiskSchemaMake(), + Schema: map[string]*schema.Schema{ + "disk_id": { + Type: schema.TypeInt, + Computed: true, + }, + "disk_name": { + Type: schema.TypeString, + Computed: true, + }, + "pool_name": { + Type: schema.TypeString, + Computed: true, + }, + "sep_id": { + Type: schema.TypeInt, + Computed: true, + }, + "shareable": { + Type: schema.TypeBool, + Computed: true, + }, + "size_max": { + Type: schema.TypeInt, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + }, }, }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + }, } return res } diff --git a/internal/service/cloudbroker/account/data_source_account_flipgroups_list.go b/internal/service/cloudbroker/account/data_source_account_flipgroups_list.go index 0001ac9..df151a1 100644 --- a/internal/service/cloudbroker/account/data_source_account_flipgroups_list.go +++ b/internal/service/cloudbroker/account/data_source_account_flipgroups_list.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -49,6 +49,7 @@ func dataSourceAccountFlipGroupsListRead(ctx context.Context, d *schema.Resource id := uuid.New() d.SetId(id.String()) d.Set("items", flattenAccountFlipGroupsList(accountFlipGroupsList)) + d.Set("entry_count", accountFlipGroupsList.EntryCount) return nil } @@ -60,14 +61,143 @@ func dataSourceAccountFlipGroupsListSchemaMake() map[string]*schema.Schema { Required: true, Description: "ID of the account", }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by name", + }, + "vins_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by ViNS ID", + }, + "vins_name": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by ViNS name", + }, + "extnet_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by extnet ID", + }, + "by_ip": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by IP", + }, + "flipgroup_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by flipgroup ID", + }, + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "Page number", + }, + "size": { + Type: schema.TypeInt, + Optional: true, + Description: "Page size", + }, "items": { Type: schema.TypeList, Computed: true, Description: "Search Result", Elem: &schema.Resource{ - Schema: dataSourceAccountFlipGroupSchemaMake(), + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeInt, + Computed: true, + }, + "client_type": { + Type: schema.TypeString, + Computed: true, + }, + "conn_type": { + Type: schema.TypeString, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "default_gw": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "desc": { + Type: schema.TypeString, + Computed: true, + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "fg_id": { + Type: schema.TypeInt, + Computed: true, + }, + "ip": { + Type: schema.TypeString, + Computed: true, + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + }, + "fg_name": { + Type: schema.TypeString, + Computed: true, + }, + "net_id": { + Type: schema.TypeInt, + Computed: true, + }, + "net_type": { + Type: schema.TypeString, + Computed: true, + }, + "netmask": { + Type: schema.TypeInt, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + }, }, }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + }, } return res } @@ -85,4 +215,4 @@ func DataSourceAccountFlipGroupsList() *schema.Resource { Schema: dataSourceAccountFlipGroupsListSchemaMake(), } -} +} \ No newline at end of file diff --git a/internal/service/cloudbroker/account/data_source_account_get_resource_consumption.go b/internal/service/cloudbroker/account/data_source_account_get_resource_consumption.go new file mode 100644 index 0000000..baef099 --- /dev/null +++ b/internal/service/cloudbroker/account/data_source_account_get_resource_consumption.go @@ -0,0 +1,236 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package account + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceAccountResourceConsumptionGetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + accountResourceConsumptionRec, err := utilityAccountResourceConsumptionGetCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + flattenResourceConsumption(d, accountResourceConsumptionRec) + return nil +} + +func dataSourceAccountResourceConsumptionGetSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeInt, + Required: true, + }, + "consumed": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Computed: true, + }, + "disksize": { + Type: schema.TypeFloat, + Computed: true, + }, + "disksizemax": { + Type: schema.TypeFloat, + Computed: true, + }, + "extips": { + Type: schema.TypeInt, + Computed: true, + }, + "exttraffic": { + Type: schema.TypeInt, + Computed: true, + }, + "gpu": { + Type: schema.TypeInt, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "seps": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sep_id": { + Type: schema.TypeString, + Computed: true, + }, + "data_name": { + Type: schema.TypeString, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "reserved": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Computed: true, + }, + "disksize": { + Type: schema.TypeFloat, + Computed: true, + }, + "disksizemax": { + Type: schema.TypeFloat, + Computed: true, + }, + "extips": { + Type: schema.TypeInt, + Computed: true, + }, + "exttraffic": { + Type: schema.TypeInt, + Computed: true, + }, + "gpu": { + Type: schema.TypeInt, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "seps": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sep_id": { + Type: schema.TypeString, + Computed: true, + }, + "data_name": { + Type: schema.TypeString, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "resource_limits": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cu_c": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_d": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_dm": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_i": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_m": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_np": { + Type: schema.TypeFloat, + Computed: true, + }, + "gpu_units": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + } + + return res +} + +func DataSourceAccountResourceConsumptionGet() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceAccountResourceConsumptionGetRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceAccountResourceConsumptionGetSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/account/data_source_account_list.go b/internal/service/cloudbroker/account/data_source_account_list.go index ebbcab0..f038320 100644 --- a/internal/service/cloudbroker/account/data_source_account_list.go +++ b/internal/service/cloudbroker/account/data_source_account_list.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Nikita Sorokin, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -40,6 +41,21 @@ import ( "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" ) +func DataSourceAccountList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceAccountListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceAccountListSchemaMake(), + } +} + func dataSourceAccountListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { accountList, err := utilityAccountListCheckPresence(ctx, d, m) if err != nil { @@ -49,12 +65,33 @@ func dataSourceAccountListRead(ctx context.Context, d *schema.ResourceData, m in id := uuid.New() d.SetId(id.String()) d.Set("items", flattenAccountList(accountList)) + d.Set("entry_count", accountList.EntryCount) return nil } func dataSourceAccountListSchemaMake() map[string]*schema.Schema { res := map[string]*schema.Schema{ + "by_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by ID", + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by name", + }, + "acl": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by ACL", + }, + "status": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by status", + }, "page": { Type: schema.TypeInt, Optional: true, @@ -69,24 +106,187 @@ func dataSourceAccountListSchemaMake() map[string]*schema.Schema { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ - Schema: dataSourceAccountItemSchemaMake(), + Schema: map[string]*schema.Schema{ + "dc_location": { + Type: schema.TypeString, + Computed: true, + }, + "ckey": { + Type: schema.TypeString, + Computed: true, + }, + "meta": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "explicit": { + Type: schema.TypeBool, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "right": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "user_group_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "company": { + Type: schema.TypeString, + Computed: true, + }, + "companyurl": { + Type: schema.TypeString, + Computed: true, + }, + "cpu_allocation_parameter": { + Type: schema.TypeString, + Computed: true, + }, + "cpu_allocation_ratio": { + Type: schema.TypeFloat, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "deactivation_time": { + Type: schema.TypeFloat, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "displayname": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "account_id": { + Type: schema.TypeInt, + Computed: true, + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + }, + "resource_limits": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cu_c": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_d": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_dm": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_i": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_m": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_np": { + Type: schema.TypeFloat, + Computed: true, + }, + "gpu_units": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + "resource_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "send_access_emails": { + Type: schema.TypeBool, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "uniq_pools": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "version": { + Type: schema.TypeInt, + Computed: true, + }, + "vins": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, }, }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + }, } return res } -func DataSourceAccountList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceAccountListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceAccountListSchemaMake(), - } -} diff --git a/internal/service/cloudbroker/account/account_rg_ds_subresource.go b/internal/service/cloudbroker/account/data_source_account_resource_consumption_list.go similarity index 51% rename from internal/service/cloudbroker/account/account_rg_ds_subresource.go rename to internal/service/cloudbroker/account/data_source_account_resource_consumption_list.go index e2c43f9..a58f212 100644 --- a/internal/service/cloudbroker/account/account_rg_ds_subresource.go +++ b/internal/service/cloudbroker/account/data_source_account_resource_consumption_list.go @@ -1,205 +1,212 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - -func dataSourceAccountRGSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "computes": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "started": { - Type: schema.TypeInt, - Computed: true, - }, - "stopped": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "resources": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "consumed": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeInt, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - - "limits": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeInt, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "reserved": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeInt, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "rg_id": { - Type: schema.TypeInt, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "rg_name": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "updated_by": { - Type: schema.TypeString, - Computed: true, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - "vinses": { - Type: schema.TypeInt, - Computed: true, - }, - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package account + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceAccountResourceConsumptionListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + accountResourceConsumptionList, err := utilityAccountResourceConsumptionListCheckPresence(ctx, m) + if err != nil { + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenAccResourceConsumption(accountResourceConsumptionList)) + d.Set("entry_count", accountResourceConsumptionList.EntryCount) + return nil +} + +func dataSourceAccountResourceConsumptionListSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeInt, + Computed: true, + }, + "consumed": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Computed: true, + }, + "disksize": { + Type: schema.TypeFloat, + Computed: true, + }, + "disksizemax": { + Type: schema.TypeFloat, + Computed: true, + }, + "extips": { + Type: schema.TypeInt, + Computed: true, + }, + "exttraffic": { + Type: schema.TypeInt, + Computed: true, + }, + "gpu": { + Type: schema.TypeInt, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "seps": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sep_id": { + Type: schema.TypeString, + Computed: true, + }, + "data_name": { + Type: schema.TypeString, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "reserved": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Computed: true, + }, + "disksize": { + Type: schema.TypeFloat, + Computed: true, + }, + "disksizemax": { + Type: schema.TypeFloat, + Computed: true, + }, + "extips": { + Type: schema.TypeInt, + Computed: true, + }, + "exttraffic": { + Type: schema.TypeInt, + Computed: true, + }, + "gpu": { + Type: schema.TypeInt, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "seps": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sep_id": { + Type: schema.TypeString, + Computed: true, + }, + "data_name": { + Type: schema.TypeString, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + }, + } + return res +} + +func DataSourceAccountResourceConsumptionList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceAccountResourceConsumptionListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceAccountResourceConsumptionListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/account/data_source_account_rg_list.go b/internal/service/cloudbroker/account/data_source_account_rg_list.go index 64c8ee7..c30390e 100644 --- a/internal/service/cloudbroker/account/data_source_account_rg_list.go +++ b/internal/service/cloudbroker/account/data_source_account_rg_list.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -40,6 +40,21 @@ import ( "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" ) +func DataSourceAccountRGList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceAccountRGListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceAccountRGListSchemaMake(), + } +} + func dataSourceAccountRGListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { accountRGList, err := utilityAccountRGListCheckPresence(ctx, d, m) if err != nil { @@ -49,6 +64,7 @@ func dataSourceAccountRGListRead(ctx context.Context, d *schema.ResourceData, m id := uuid.New() d.SetId(id.String()) d.Set("items", flattenAccountRGList(accountRGList)) + d.Set("entry_count", accountRGList.EntryCount) return nil } @@ -60,6 +76,42 @@ func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema { Required: true, Description: "ID of the account", }, + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "Page number", + }, + "size": { + Type: schema.TypeInt, + Optional: true, + Description: "Page size", + }, + "rg_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by RG ID", + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by name", + }, + "vins_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by ViNS ID", + }, + "vm_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by VM ID", + }, + + "status": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by status", + }, "items": { Type: schema.TypeList, Computed: true, @@ -68,21 +120,243 @@ func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema { Schema: dataSourceAccountRGSchemaMake(), }, }, + "entry_count": { + Type: schema.TypeInt, + Optional: true, + }, } return res } -func DataSourceAccountRGList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceAccountRGListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, +func dataSourceAccountRGSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "computes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "started": { + Type: schema.TypeInt, + Computed: true, + }, + "stopped": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "resources": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "consumed": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Computed: true, + }, + "disksize": { + Type: schema.TypeFloat, + Computed: true, + }, + "disksizemax": { + Type: schema.TypeInt, + Computed: true, + }, + "extips": { + Type: schema.TypeInt, + Computed: true, + }, + "exttraffic": { + Type: schema.TypeInt, + Computed: true, + }, + "gpu": { + Type: schema.TypeInt, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "seps": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sep_id": { + Type: schema.TypeString, + Computed: true, + }, + "data_name": { + Type: schema.TypeString, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "limits": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Computed: true, + }, + "disksize": { + Type: schema.TypeInt, + Computed: true, + }, + "disksizemax": { + Type: schema.TypeInt, + Computed: true, + }, + "extips": { + Type: schema.TypeInt, + Computed: true, + }, + "exttraffic": { + Type: schema.TypeInt, + Computed: true, + }, + "gpu": { + Type: schema.TypeInt, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "seps": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "reserved": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Computed: true, + }, + "disksize": { + Type: schema.TypeInt, + Computed: true, + }, + "disksizemax": { + Type: schema.TypeInt, + Computed: true, + }, + "extips": { + Type: schema.TypeInt, + Computed: true, + }, + "exttraffic": { + Type: schema.TypeInt, + Computed: true, + }, + "gpu": { + Type: schema.TypeInt, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "seps": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sep_id": { + Type: schema.TypeString, + Computed: true, + }, + "data_name": { + Type: schema.TypeString, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "rg_id": { + Type: schema.TypeInt, + Computed: true, + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + }, + "rg_name": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "vinses": { + Type: schema.TypeInt, + Computed: true, }, - - Schema: dataSourceAccountRGListSchemaMake(), } } diff --git a/internal/service/cloudbroker/account/data_source_account_vins_list.go b/internal/service/cloudbroker/account/data_source_account_vins_list.go index 3f088ac..d87228f 100644 --- a/internal/service/cloudbroker/account/data_source_account_vins_list.go +++ b/internal/service/cloudbroker/account/data_source_account_vins_list.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -49,6 +49,7 @@ func dataSourceAccountVinsListRead(ctx context.Context, d *schema.ResourceData, id := uuid.New() d.SetId(id.String()) d.Set("items", flattenAccountVinsList(accountVinsList)) + d.Set("entry_count", accountVinsList.EntryCount) return nil } @@ -60,14 +61,117 @@ func dataSourceAccountVinsListSchemaMake() map[string]*schema.Schema { Required: true, Description: "ID of the account", }, + "vins_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by ViNS ID", + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by name", + }, + "rg_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by RG ID", + }, + "ext_ip": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by external IP", + }, + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "Page number", + }, + "size": { + Type: schema.TypeInt, + Optional: true, + Description: "Page size", + }, "items": { Type: schema.TypeList, Computed: true, Description: "Search Result", Elem: &schema.Resource{ - Schema: dataSourceAccountVinsSchema(), + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeInt, + Computed: true, + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + }, + "computes": { + Type: schema.TypeInt, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "external_ip": { + Type: schema.TypeString, + Computed: true, + }, + "vin_id": { + Type: schema.TypeInt, + Computed: true, + }, + "vin_name": { + Type: schema.TypeString, + Computed: true, + }, + "network": { + Type: schema.TypeString, + Computed: true, + }, + "pri_vnf_dev_id": { + Type: schema.TypeInt, + Computed: true, + }, + "rg_id": { + Type: schema.TypeInt, + Computed: true, + }, + "rg_name": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + }, }, }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + }, } return res } @@ -85,4 +189,4 @@ func DataSourceAccountVinsList() *schema.Resource { Schema: dataSourceAccountVinsListSchemaMake(), } -} +} \ No newline at end of file diff --git a/internal/service/cloudbroker/account/flattens.go b/internal/service/cloudbroker/account/flattens.go index 39ccf95..a5576be 100644 --- a/internal/service/cloudbroker/account/flattens.go +++ b/internal/service/cloudbroker/account/flattens.go @@ -8,11 +8,12 @@ import ( func flattenResourceAccount(d *schema.ResourceData, acc *account.RecordAccount) { d.Set("dc_location", acc.DCLocation) - // d.Set("resources", flattenAccResources(acc.Resources)) d.Set("ckey", acc.CKey) d.Set("acl", flattenAccAcl(acc.ACL)) d.Set("company", acc.Company) d.Set("companyurl", acc.CompanyURL) + d.Set("cpu_allocation_parameter", acc.CPUAllocationParameter) + d.Set("cpu_allocation_ratio", acc.CPUAllocationRatio) d.Set("created_by", acc.CreatedBy) d.Set("created_time", acc.CreatedTime) d.Set("deactivation_time", acc.DeactivationTime) @@ -23,8 +24,10 @@ func flattenResourceAccount(d *schema.ResourceData, acc *account.RecordAccount) d.Set("account_id", acc.ID) d.Set("account_name", acc.Name) d.Set("resource_limits", flattenRgResourceLimits(acc.ResourceLimits)) + d.Set("resource_types", acc.ResTypes) d.Set("send_access_emails", acc.SendAccessEmails) d.Set("status", acc.Status) + d.Set("uniq_pools", acc.UniqPools) d.Set("updated_time", acc.UpdatedTime) d.Set("version", acc.Version) d.Set("vins", acc.VINS) @@ -32,12 +35,12 @@ func flattenResourceAccount(d *schema.ResourceData, acc *account.RecordAccount) func flattenDataAccount(d *schema.ResourceData, acc *account.RecordAccount) { d.Set("dc_location", acc.DCLocation) - // d.Set("resources", flattenAccResources(acc.Resources)) d.Set("ckey", acc.CKey) - // d.Set("meta", flattens.FlattenMeta(acc.)) d.Set("acl", flattenAccAcl(acc.ACL)) d.Set("company", acc.Company) d.Set("companyurl", acc.CompanyURL) + d.Set("cpu_allocation_parameter", acc.CPUAllocationParameter) + d.Set("cpu_allocation_ratio", acc.CPUAllocationRatio) d.Set("created_by", acc.CreatedBy) d.Set("created_time", acc.CreatedTime) d.Set("deactivation_time", acc.DeactivationTime) @@ -48,9 +51,10 @@ func flattenDataAccount(d *schema.ResourceData, acc *account.RecordAccount) { d.Set("account_id", acc.ID) d.Set("account_name", acc.Name) d.Set("resource_limits", flattenRgResourceLimits(acc.ResourceLimits)) + d.Set("resource_types", acc.ResTypes) d.Set("send_access_emails", acc.SendAccessEmails) - // d.Set("service_account", acc.ServiceAccount) d.Set("status", acc.Status) + d.Set("uniq_pools", acc.UniqPools) d.Set("updated_time", acc.UpdatedTime) d.Set("version", acc.Version) d.Set("vins", acc.VINS) @@ -94,7 +98,7 @@ func flattenAccRGComputes(argc account.Computes) []map[string]interface{} { func flattenAccRGResources(argr account.RGResuorces) []map[string]interface{} { res := make([]map[string]interface{}, 0) temp := map[string]interface{}{ - "consumed": flattenAccConsumed(argr.Consumed), + "consumed": flattenAccResource(argr.Consumed), "limits": flattenAccLimits(argr.Limits), "reserved": flattenAccResource(argr.Reserved), } @@ -102,39 +106,17 @@ func flattenAccRGResources(argr account.RGResuorces) []map[string]interface{} { return res } -func flattenAccResources(r account.RecordResourceConsumption) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - temp := map[string]interface{}{ - "current": flattenAccResource(r.Current), - "reserved": flattenAccResource(r.Reserved), - } - res = append(res, temp) - return res -} - -func flattenAccConsumed(c account.Consumed) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - temp := map[string]interface{}{ - "cpu": c.CPU, - "disksize": c.DiskSize, - "extips": c.ExtIPs, - "exttraffic": c.ExtTraffic, - "gpu": c.GPU, - "ram": c.RAM, - } - res = append(res, temp) - return res -} - func flattenAccLimits(l account.Limits) []map[string]interface{} { res := make([]map[string]interface{}, 0) temp := map[string]interface{}{ - "cpu": l.CPU, - "disksize": l.DiskSize, - "extips": l.ExtIPs, - "exttraffic": l.ExtTraffic, - "gpu": l.GPU, - "ram": l.RAM, + "cpu": l.CPU, + "disksize": l.DiskSize, + "disksizemax": l.DiskSizeMax, + "extips": l.ExtIPs, + "exttraffic": l.ExtTraffic, + "gpu": l.GPU, + "ram": l.RAM, + "seps": l.SEPs, } res = append(res, temp) return res @@ -143,12 +125,14 @@ func flattenAccLimits(l account.Limits) []map[string]interface{} { func flattenAccResource(r account.Resource) []map[string]interface{} { res := make([]map[string]interface{}, 0) temp := map[string]interface{}{ - "cpu": r.CPU, - "disksize": r.DiskSize, - "extips": r.ExtIPs, - "exttraffic": r.ExtTraffic, - "gpu": r.GPU, - "ram": r.RAM, + "cpu": r.CPU, + "disksize": r.DiskSize, + "disksizemax": r.DiskSizeMax, + "extips": r.ExtIPs, + "exttraffic": r.ExtTraffic, + "gpu": r.GPU, + "ram": r.RAM, + "seps": flattenAccountSeps(r.SEPs), } res = append(res, temp) return res @@ -175,6 +159,7 @@ func flattenRgResourceLimits(rl account.ResourceLimits) []map[string]interface{} temp := map[string]interface{}{ "cu_c": rl.CuC, "cu_d": rl.CuD, + "cu_dm": rl.CuDM, "cu_i": rl.CuI, "cu_m": rl.CuM, "cu_np": rl.CuNP, @@ -186,7 +171,7 @@ func flattenRgResourceLimits(rl account.ResourceLimits) []map[string]interface{} } func flattenRgAcl(rgAcls []account.ACL) []map[string]interface{} { - res := make([]map[string]interface{}, 0) + res := make([]map[string]interface{}, len(rgAcls)) for _, rgAcl := range rgAcls { temp := map[string]interface{}{ "explicit": rgAcl.Explicit, @@ -205,38 +190,31 @@ func flattenListDeleted(al *account.ListAccounts) []map[string]interface{} { res := make([]map[string]interface{}, 0) for _, acc := range al.Data { temp := map[string]interface{}{ - "dc_location": acc.DCLocation, - "ckey": acc.CKey, - "meta": flattens.FlattenMeta(acc.Meta), - - "acl": flattenRgAcl(acc.ACL), - - "company": acc.Company, - "companyurl": acc.CompanyURL, - "created_by": acc.CreatedBy, - - "created_time": acc.CreatedTime, - - "deactivation_time": acc.DeactivationTime, - "deleted_by": acc.DeletedBy, - - "deleted_time": acc.DeletedTime, - - "displayname": acc.DisplayName, - "guid": acc.GUID, - - "account_id": acc.ID, - "account_name": acc.Name, - - "resource_limits": flattenRgResourceLimits(acc.ResourceLimits), - "send_access_emails": acc.SendAccessEmails, - // "service_account": acc.ServiceAccount, - - "status": acc.Status, - "updated_time": acc.UpdatedTime, - - "version": acc.Version, - "vins": acc.VINS, + "dc_location": acc.DCLocation, + "ckey": acc.CKey, + "meta": flattens.FlattenMeta(acc.Meta), + "acl": flattenRgAcl(acc.ACL), + "company": acc.Company, + "companyurl": acc.CompanyURL, + "cpu_allocation_parameter": acc.CPUAllocationParameter, + "cpu_allocation_ratio": acc.CPUAllocationRatio, + "created_by": acc.CreatedBy, + "created_time": acc.CreatedTime, + "deactivation_time": acc.DeactivationTime, + "deleted_by": acc.DeletedBy, + "deleted_time": acc.DeletedTime, + "displayname": acc.DisplayName, + "guid": acc.GUID, + "account_id": acc.ID, + "account_name": acc.Name, + "resource_limits": flattenRgResourceLimits(acc.ResourceLimits), + "resource_types": acc.ResTypes, + "send_access_emails": acc.SendAccessEmails, + "status": acc.Status, + "uniq_pools": acc.UniqPools, + "updated_time": acc.UpdatedTime, + "version": acc.Version, + "vins": acc.VINS, } res = append(res, temp) } @@ -247,38 +225,31 @@ func flattenAccountList(al *account.ListAccounts) []map[string]interface{} { res := make([]map[string]interface{}, 0) for _, acc := range al.Data { temp := map[string]interface{}{ - "dc_location": acc.DCLocation, - "ckey": acc.CKey, - "meta": flattens.FlattenMeta(acc.Meta), - - "acl": flattenRgAcl(acc.ACL), - - "company": acc.Company, - "companyurl": acc.CompanyURL, - "created_by": acc.CreatedBy, - - "created_time": acc.CreatedTime, - - "deactivation_time": acc.DeactivationTime, - "deleted_by": acc.DeletedBy, - - "deleted_time": acc.DeletedTime, - - "displayname": acc.DisplayName, - "guid": acc.GUID, - - "account_id": acc.ID, - "account_name": acc.Name, - - "resource_limits": flattenRgResourceLimits(acc.ResourceLimits), - "send_access_emails": acc.SendAccessEmails, - // "service_account": acc.ServiceAccount, - - "status": acc.Status, - "updated_time": acc.UpdatedTime, - - "version": acc.Version, - "vins": acc.VINS, + "dc_location": acc.DCLocation, + "ckey": acc.CKey, + "meta": flattens.FlattenMeta(acc.Meta), + "acl": flattenRgAcl(acc.ACL), + "company": acc.Company, + "companyurl": acc.CompanyURL, + "cpu_allocation_parameter": acc.CPUAllocationParameter, + "cpu_allocation_ratio": acc.CPUAllocationRatio, + "created_by": acc.CreatedBy, + "created_time": acc.CreatedTime, + "deactivation_time": acc.DeactivationTime, + "deleted_by": acc.DeletedBy, + "deleted_time": acc.DeletedTime, + "displayname": acc.DisplayName, + "guid": acc.GUID, + "account_id": acc.ID, + "account_name": acc.Name, + "resource_limits": flattenRgResourceLimits(acc.ResourceLimits), + "resource_types": acc.ResTypes, + "send_access_emails": acc.SendAccessEmails, + "status": acc.Status, + "uniq_pools": acc.UniqPools, + "updated_time": acc.UpdatedTime, + "version": acc.Version, + "vins": acc.VINS, } res = append(res, temp) } @@ -338,6 +309,7 @@ func flattenAccountDisksList(adl *account.ListDisks) []map[string]interface{} { "disk_name": ad.Name, "pool_name": ad.Pool, "sep_id": ad.SepID, + "shareable": ad.Shareable, "size_max": ad.SizeMax, "type": ad.Type, } @@ -403,3 +375,39 @@ func flattenAccountVinsList(avl *account.ListVINS) []map[string]interface{} { } return res } + +func flattenResourceConsumption(d *schema.ResourceData, acc *account.RecordResourceConsumption) { + d.Set("account_id", acc.AccountID) + d.Set("consumed", flattenAccResource(acc.Consumed)) + d.Set("reserved", flattenAccResource(acc.Reserved)) + d.Set("resource_limits", flattenRgResourceLimits(acc.ResourceLimits)) +} + +func flattenAccountSeps(seps map[string]map[string]account.DiskUsage) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for sepKey, sepVal := range seps { + for dataKey, dataVal := range sepVal { + temp := map[string]interface{}{ + "sep_id": sepKey, + "data_name": dataKey, + "disk_size": dataVal.DiskSize, + "disk_size_max": dataVal.DiskSizeMax, + } + res = append(res, temp) + } + } + return res +} + +func flattenAccResourceConsumption(lrc *account.ListResources) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(lrc.Data)) + for _, rc := range lrc.Data { + temp := map[string]interface{}{ + "consumed": flattenAccResource(rc.Consumed), + "reserved": flattenAccResource(rc.Reserved), + "account_id": rc.AccountID, + } + res = append(res, temp) + } + return res +} diff --git a/internal/service/cloudbroker/account/models.go b/internal/service/cloudbroker/account/models.go deleted file mode 100644 index 1c45d74..0000000 --- a/internal/service/cloudbroker/account/models.go +++ /dev/null @@ -1,219 +0,0 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -type AccountAclRecord struct { - IsExplicit bool `json:"explicit"` - Guid string `json:"guid"` - Rights string `json:"right"` - Status string `json:"status"` - Type string `json:"type"` - UgroupID string `json:"userGroupId"` -} - -type ResourceLimits struct { - CUC float64 `json:"CU_C"` - CUD float64 `json:"CU_D"` - CUI float64 `json:"CU_I"` - CUM float64 `json:"CU_M"` - CUNP float64 `json:"CU_NP"` - GpuUnits float64 `json:"gpu_units"` -} - -type Account struct { - DCLocation string `json:"DCLocation"` - CKey string `jspn:"_ckey"` - Meta []interface{} `json:"_meta"` - Acl []AccountAclRecord `json:"acl"` - Company string `json:"company"` - CompanyUrl string `json:"companyurl"` - CreatedBy string `jspn:"createdBy"` - CreatedTime int `json:"createdTime"` - DeactiovationTime float64 `json:"deactivationTime"` - DeletedBy string `json:"deletedBy"` - DeletedTime int `json:"deletedTime"` - DisplayName string `json:"displayname"` - GUID int `json:"guid"` - ID int `json:"id"` - Name string `json:"name"` - ResourceLimits ResourceLimits `json:"resourceLimits"` - SendAccessEmails bool `json:"sendAccessEmails"` - ServiceAccount bool `json:"serviceAccount"` - Status string `json:"status"` - UpdatedTime int `json:"updatedTime"` - Version int `json:"version"` - Vins []int `json:"vins"` -} - -type AccountList []Account - -type Resource struct { - CPU int `json:"cpu"` - Disksize int `json:"disksize"` - Extips int `json:"extips"` - Exttraffic int `json:"exttraffic"` - GPU int `json:"gpu"` - RAM int `json:"ram"` -} - -type Resources struct { - Current Resource `json:"Current"` - Reserved Resource `json:"Reserved"` -} - -type AccountWithResources struct { - Account - Resources Resources `json:"Resources"` -} - -type AccountCompute struct { - AccountId int `json:"accountId"` - AccountName string `json:"accountName"` - CPUs int `json:"cpus"` - CreatedBy string `json:"createdBy"` - CreatedTime int `json:"createdTime"` - DeletedBy string `json:"deletedBy"` - DeletedTime int `json:"deletedTime"` - ComputeId int `json:"id"` - ComputeName string `json:"name"` - RAM int `json:"ram"` - Registered bool `json:"registered"` - RgId int `json:"rgId"` - RgName string `json:"rgName"` - Status string `json:"status"` - TechStatus string `json:"techStatus"` - TotalDisksSize int `json:"totalDisksSize"` - UpdatedBy string `json:"updatedBy"` - UpdatedTime int `json:"updatedTime"` - UserManaged bool `json:"userManaged"` - VinsConnected int `json:"vinsConnected"` -} - -type AccountComputesList []AccountCompute - -type AccountDisk struct { - ID int `json:"id"` - Name string `json:"name"` - Pool string `json:"pool"` - SepId int `json:"sepId"` - SizeMax int `json:"sizeMax"` - Type string `json:"type"` -} - -type AccountDisksList []AccountDisk - -type AccountVin struct { - AccountId int `json:"accountId"` - AccountName string `json:"accountName"` - Computes int `json:"computes"` - CreatedBy string `json:"createdBy"` - CreatedTime int `json:"createdTime"` - DeletedBy string `json:"deletedBy"` - DeletedTime int `json:"deletedTime"` - ExternalIP string `json:"externalIP"` - ID int `json:"id"` - Name string `json:"name"` - Network string `json:"network"` - PriVnfDevId int `json:"priVnfDevId"` - RgId int `json:"rgId"` - RgName string `json:"rgName"` - Status string `json:"status"` - UpdatedBy string `json:"updatedBy"` - UpdatedTime int `json:"updatedTime"` -} - -type AccountVinsList []AccountVin - -type AccountAudit struct { - Call string `json:"call"` - ResponseTime float64 `json:"responsetime"` - StatusCode int `json:"statuscode"` - Timestamp float64 `json:"timestamp"` - User string `json:"user"` -} - -type AccountAuditsList []AccountAudit - -type AccountRGComputes struct { - Started int `json:"Started"` - Stopped int `json:"Stopped"` -} - -type AccountRGResources struct { - Consumed Resource `json:"Consumed"` - Limits Resource `json:"Limits"` - Reserved Resource `json:"Reserved"` -} - -type AccountRG struct { - Computes AccountRGComputes `json:"Computes"` - Resources AccountRGResources `json:"Resources"` - CreatedBy string `json:"createdBy"` - CreatedTime int `json:"createdTime"` - DeletedBy string `json:"deletedBy"` - DeletedTime int `json:"deletedTime"` - RGID int `json:"id"` - Milestones int `json:"milestones"` - RGName string `json:"name"` - Status string `json:"status"` - UpdatedBy string `json:"updatedBy"` - UpdatedTime int `json:"updatedTime"` - Vinses int `json:"vinses"` -} - -type AccountRGList []AccountRG - -type AccountFlipGroup struct { - AccountId int `json:"accountId"` - ClientType string `json:"clientType"` - ConnType string `json:"connType"` - CreatedBy string `json:"createdBy"` - CreatedTime int `json:"createdTime"` - DefaultGW string `json:"defaultGW"` - DeletedBy string `json:"deletedBy"` - DeletedTime int `json:"deletedTime"` - Desc string `json:"desc"` - GID int `json:"gid"` - GUID int `json:"guid"` - ID int `json:"id"` - IP string `json:"ip"` - Milestones int `json:"milestones"` - Name string `json:"name"` - NetID int `json:"netId"` - NetType string `json:"netType"` - NetMask int `json:"netmask"` - Status string `json:"status"` - UpdatedBy string `json:"updatedBy"` - UpdatedTime int `json:"updatedTime"` -} - -type AccountFlipGroupsList []AccountFlipGroup diff --git a/internal/service/cloudbroker/account/resource_account.go b/internal/service/cloudbroker/account/resource_account.go index 6e218df..faefeed 100644 --- a/internal/service/cloudbroker/account/resource_account.go +++ b/internal/service/cloudbroker/account/resource_account.go @@ -42,12 +42,15 @@ import ( "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" ) func resourceAccountCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceAccountCreate") + log.Debugf("resourseAccountCreate") c := m.(*controller.ControllerCfg) + req := account.CreateRequest{} req.Name = d.Get("account_name").(string) @@ -56,57 +59,63 @@ func resourceAccountCreate(ctx context.Context, d *schema.ResourceData, m interf if emailaddress, ok := d.GetOk("emailaddress"); ok { req.EmailAddress = emailaddress.(string) } + if sendAccessEmails, ok := d.GetOk("send_access_emails"); ok { req.SendAccessEmails = sendAccessEmails.(bool) } + + if uniqPools, ok := d.GetOk("uniq_pools"); ok { + uniqPools := uniqPools.([]interface{}) + for _, pool := range uniqPools { + req.UniqPools = append(req.UniqPools, pool.(string)) + } + } + if resLimits, ok := d.GetOk("resource_limits"); ok { - resLimit := resLimits.([]interface{})[0] - resLimitConv := resLimit.(map[string]interface{}) - if resLimitConv["cu_m"] != nil { - maxMemCap := int64(resLimitConv["cu_m"].(float64)) + resLimits := resLimits.([]interface{})[0] + resLimitsConv := resLimits.(map[string]interface{}) + if resLimitsConv["cu_m"] != nil { + maxMemCap := int64(resLimitsConv["cu_m"].(float64)) if maxMemCap == 0 { req.MaxMemoryCapacity = -1 } else { req.MaxMemoryCapacity = maxMemCap } } - if resLimitConv["cu_d"] != nil { - maxDiskCap := int64(resLimitConv["cu_d"].(float64)) + if resLimitsConv["cu_dm"] != nil { + maxDiskCap := int64(resLimitsConv["cu_dm"].(float64)) if maxDiskCap == 0 { req.MaxVDiskCapacity = -1 } else { req.MaxVDiskCapacity = maxDiskCap } } - if resLimitConv["cu_c"] != nil { - maxCPUCap := int64(resLimitConv["cu_c"].(float64)) + if resLimitsConv["cu_c"] != nil { + maxCPUCap := int64(resLimitsConv["cu_c"].(float64)) if maxCPUCap == 0 { req.MaxCPUCapacity = -1 } else { req.MaxCPUCapacity = maxCPUCap } - } - if resLimitConv["cu_i"] != nil { - maxNumPublicIP := int64(resLimitConv["cu_i"].(float64)) + if resLimitsConv["cu_i"] != nil { + maxNumPublicIP := int64(resLimitsConv["cu_i"].(float64)) if maxNumPublicIP == 0 { req.MaxNumPublicIP = -1 } else { req.MaxNumPublicIP = maxNumPublicIP } - } - if resLimitConv["cu_np"] != nil { - maxNP := int64(resLimitConv["cu_np"].(float64)) + if resLimitsConv["cu_np"] != nil { + maxNP := int64(resLimitsConv["cu_np"].(float64)) if maxNP == 0 { req.MaxNetworkPeerTransfer = -1 } else { req.MaxNetworkPeerTransfer = maxNP } - } - if resLimitConv["gpu_units"] != nil { - gpuUnits := int64(resLimitConv["gpu_units"].(float64)) + if resLimitsConv["gpu_units"] != nil { + gpuUnits := int64(resLimitsConv["gpu_units"].(float64)) if gpuUnits == 0 { req.GPUUnits = -1 } else { @@ -121,55 +130,72 @@ func resourceAccountCreate(ctx context.Context, d *schema.ResourceData, m interf } d.SetId(strconv.FormatUint(accountId, 10)) - d.Set("account_id", accountId) - diagnostics := resourceAccountRead(ctx, d, m) - if diagnostics != nil { - return diagnostics - } + var w dc.Warnings - if enable, ok := d.GetOk("enable"); ok { - enable := enable.(bool) - if enable { - req := account.EnableRequest{ - AccountID: accountId, - } - _, err := c.CloudBroker().Account().Enable(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } else { - req := account.DisableRequest{ - AccountID: accountId, + if users, ok := d.GetOk("users"); ok { + addedUsers := users.([]interface{}) + + for _, user := range addedUsers { + userConv := user.(map[string]interface{}) + + req := account.AddUserRequest{ + AccountID: accountId, + Username: userConv["user_id"].(string), + AccessType: userConv["access_type"].(string), } - _, err := c.CloudBroker().Account().Disable(ctx, req) + + _, err := c.CloudBroker().Account().AddUser(ctx, req) if err != nil { - return diag.FromErr(err) + w.Add(err) } } } - if users, ok := d.GetOk("users"); ok { - addedUsers := users.([]interface{}) + if cpuAllocationParameter, ok := d.GetOk("cpu_allocation_parameter"); ok { + cpuAllocationParameter := cpuAllocationParameter.(string) - if len(addedUsers) > 0 { - for _, user := range addedUsers { - userConv := user.(map[string]interface{}) - req := account.AddUserRequest{ - AccountID: accountId, - Username: userConv["user_id"].(string), - AccessType: userConv["access_type"].(string), - } + req := account.SetCPUAllocationParameterRequest{ + AccountID: accountId, + StrictLoose: cpuAllocationParameter, + } - _, err := c.CloudBroker().Account().AddUser(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } + log.Debugf("setting account cpu allocation parameter") + _, err := c.CloudBroker().Account().SetCPUAllocationParameter(ctx, req) + if err != nil { + w.Add(err) } } - return nil + if cpuAllocationRatio, ok := d.GetOk("cpu_allocation_ratio"); ok { + cpuAllocationRatio := cpuAllocationRatio.(float64) + + req := account.SetCPUAllocationRatioRequest{ + AccountID: accountId, + Ratio: cpuAllocationRatio, + } + + log.Debugf("setting account cpu allocation ratio") + _, err := c.CloudBroker().Account().SetCPUAllocationRatio(ctx, req) + if err != nil { + w.Add(err) + } + } + + if !d.Get("enable").(bool) { + _, err := c.CloudBroker().Account().Disable(ctx, account.DisableRequest{ + AccountID: accountId, + }) + + if err != nil { + w.Add(err) + } + } + + diags := resourceAccountRead(ctx, d, m) + diags = append(diags, w.Get()...) + + return diags } func resourceAccountRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { @@ -220,35 +246,88 @@ func resourceAccountUpdate(ctx context.Context, d *schema.ResourceData, m interf return diag.FromErr(err) } + accountId, _ := strconv.ParseUint(d.Id(), 10, 64) + + hasChanged := false + + switch acc.Status { + case status.Destroyed: + d.SetId("") + // return resourceAccountCreate(ctx, d, m) + return diag.Errorf("The resource cannot be updated because it has been destroyed") + case status.Destroying: + return diag.Errorf("The account is in progress with status: %s", acc.Status) + case status.Deleted: + _, err := c.CloudBroker().Account().Restore(ctx, account.RestoreRequest{ + AccountID: accountId, + }) + + if err != nil { + return diag.FromErr(err) + } + + hasChanged = true + case status.Disabled: + log.Debugf("The account is in status: %s, troubles may occur with update. Please, enable account first.", acc.Status) + case status.Confirmed: + } + + if hasChanged { + acc, err = utilityAccountCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + } + if d.HasChange("enable") { enable := d.Get("enable").(bool) - if enable { - req := account.EnableRequest{ - AccountID: acc.ID, - } - _, err := c.CloudBroker().Account().Enable(ctx, req) + + if enable && acc.Status == status.Disabled { + _, err := c.CloudBroker().Account().Enable(ctx, account.EnableRequest{ + AccountID: accountId, + }) + if err != nil { return diag.FromErr(err) } - } else { - req := account.DisableRequest{ - AccountID: acc.ID, - } - _, err := c.CloudBroker().Account().Disable(ctx, req) + } else if !enable && acc.Status == status.Enabled { + _, err := c.CloudBroker().Account().Disable(ctx, account.DisableRequest{ + AccountID: accountId, + }) + if err != nil { return diag.FromErr(err) } } } - updateReq := account.UpdateRequest{AccountID: acc.ID} - hasChange := false + req := account.UpdateRequest{ + AccountID: accountId, + } + + updated := false if d.HasChange("account_name") { - updateReq.Name = d.Get("account_name").(string) + req.Name = d.Get("account_name").(string) + updated = true + } + + if d.HasChange("send_access_emails") { + req.SendAccessEmails = d.Get("send_access_emails").(bool) + updated = true + } + + if d.HasChange("uniq_pools") { + uniq_pools := d.Get("uniq_pools").([]interface{}) + + for _, pool := range uniq_pools { + req.UniqPools = append(req.UniqPools, pool.(string)) + } - hasChange = true + updated = true } + if d.HasChange("resource_limits") { resLimit := d.Get("resource_limits").([]interface{})[0] resLimitConv := resLimit.(map[string]interface{}) @@ -256,81 +335,84 @@ func resourceAccountUpdate(ctx context.Context, d *schema.ResourceData, m interf if resLimitConv["cu_m"] != nil { maxMemCap := int(resLimitConv["cu_m"].(float64)) if maxMemCap == 0 { - updateReq.MaxMemoryCapacity = -1 + req.MaxMemoryCapacity = -1 } else { - updateReq.MaxMemoryCapacity = int64(maxMemCap) + req.MaxMemoryCapacity = int64(maxMemCap) } } - if resLimitConv["cu_d"] != nil { - maxDiskCap := int(resLimitConv["cu_d"].(float64)) + if resLimitConv["cu_dm"] != nil { + maxDiskCap := int(resLimitConv["cu_dm"].(float64)) if maxDiskCap == 0 { - updateReq.MaxVDiskCapacity = -1 + req.MaxVDiskCapacity = -1 } else { - updateReq.MaxVDiskCapacity = int64(maxDiskCap) + req.MaxVDiskCapacity = int64(maxDiskCap) } } if resLimitConv["cu_c"] != nil { maxCPUCap := int(resLimitConv["cu_c"].(float64)) if maxCPUCap == 0 { - updateReq.MaxCPUCapacity = -1 + req.MaxCPUCapacity = -1 } else { - updateReq.MaxCPUCapacity = int64(maxCPUCap) + req.MaxCPUCapacity = int64(maxCPUCap) } - } if resLimitConv["cu_i"] != nil { maxNumPublicIP := int(resLimitConv["cu_i"].(float64)) if maxNumPublicIP == 0 { - updateReq.MaxNumPublicIP = -1 + req.MaxNumPublicIP = -1 } else { - updateReq.MaxNumPublicIP = int64(maxNumPublicIP) + req.MaxNumPublicIP = int64(maxNumPublicIP) } - } if resLimitConv["cu_np"] != nil { maxNP := int(resLimitConv["cu_np"].(float64)) if maxNP == 0 { - updateReq.MaxNetworkPeerTransfer = -1 + req.MaxNetworkPeerTransfer = -1 } else { - updateReq.MaxNetworkPeerTransfer = int64(maxNP) + req.MaxNetworkPeerTransfer = int64(maxNP) } - } if resLimitConv["gpu_units"] != nil { gpuUnits := int(resLimitConv["gpu_units"].(float64)) if gpuUnits == 0 { - updateReq.GPUUnits = -1 + req.GPUUnits = -1 } else { - updateReq.GPUUnits = int64(gpuUnits) + req.GPUUnits = int64(gpuUnits) } } - hasChange = true + updated = true } - if d.HasChange("send_access_emails") { - updateReq.SendAccessEmails = d.Get("send_access_emails").(bool) - hasChange = true + if updated { + _, err := c.CloudBroker().Account().Update(ctx, req) + if err != nil { + return diag.FromErr(err) + } } - if hasChange { - _, err := c.CloudBroker().Account().Update(ctx, updateReq) + if d.HasChange("cpu_allocation_parameter") { + cpuAllocationParameter := d.Get("cpu_allocation_parameter").(string) + + _, err := c.CloudBroker().Account().SetCPUAllocationParameter(ctx, account.SetCPUAllocationParameterRequest{ + AccountID: acc.ID, + StrictLoose: cpuAllocationParameter, + }) + if err != nil { return diag.FromErr(err) } } - if d.HasChange("restore") { - restore := d.Get("restore").(bool) - if restore { - req := account.RestoreRequest{ - AccountID: acc.ID, - } + if d.HasChange("cpu_allocation_ratio") { + cpuAllocacationRatio := d.Get("cpu_allocation_ratio").(float64) - _, err := c.CloudBroker().Account().Restore(ctx, req) - if err != nil { - return diag.FromErr(err) - } + _, err := c.CloudBroker().Account().SetCPUAllocationRatio(ctx, account.SetCPUAllocationRatioRequest{ + AccountID: acc.ID, + Ratio: cpuAllocacationRatio, + }) + if err != nil { + return diag.FromErr(err) } } @@ -347,67 +429,66 @@ func resourceAccountUpdate(ctx context.Context, d *schema.ResourceData, m interf deletedUsers = append(deletedUsers, el) } } + for _, el := range newConv { if !isContainsUser(oldConv, el) { - addedUsers = append(addedUsers, el) - } else { - if isChangedUser(oldConv, el) { + duplicate := false + for _, user := range acc.ACL { + if user.UserGroupID == el.(map[string]interface{})["user_id"].(string) { + duplicate = true + } + } + if !duplicate { + addedUsers = append(addedUsers, el) + } else if isChangedUser(oldConv, el) { updatedUsers = append(updatedUsers, el) } } } + for _, user := range deletedUsers { + userConv := user.(map[string]interface{}) - if len(deletedUsers) > 0 { - for _, user := range deletedUsers { - userConv := user.(map[string]interface{}) - req := account.DeleteUserRequest{ - AccountID: acc.ID, - UserName: userConv["user_id"].(string), - RecursiveDelete: userConv["recursive_delete"].(bool), - } + _, err := c.CloudBroker().Account().DeleteUser(ctx, account.DeleteUserRequest{ + AccountID: accountId, + UserName: userConv["user_id"].(string), + RecursiveDelete: userConv["recursive_delete"].(bool), + }) - _, err := c.CloudBroker().Account().DeleteUser(ctx, req) - if err != nil { - return diag.FromErr(err) - } + if err != nil { + return diag.FromErr(err) } } - if len(addedUsers) > 0 { - for _, user := range addedUsers { - userConv := user.(map[string]interface{}) - req := account.AddUserRequest{ - AccountID: acc.ID, - Username: userConv["user_id"].(string), - AccessType: strings.ToUpper(userConv["access_type"].(string)), - } + for _, user := range addedUsers { + userConv := user.(map[string]interface{}) - _, err := c.CloudBroker().Account().AddUser(ctx, req) - if err != nil { - return diag.FromErr(err) - } + _, err := c.CloudBroker().Account().AddUser(ctx, account.AddUserRequest{ + AccountID: accountId, + Username: userConv["user_id"].(string), + AccessType: strings.ToUpper(userConv["access_type"].(string)), + }) + + if err != nil { + return diag.FromErr(err) } } - if len(updatedUsers) > 0 { - for _, user := range updatedUsers { - userConv := user.(map[string]interface{}) - req := account.UpdateUserRequest{ - AccountID: acc.ID, - UserID: userConv["user_id"].(string), - AccessType: strings.ToUpper(userConv["access_type"].(string)), - } + for _, user := range updatedUsers { + userConv := user.(map[string]interface{}) - _, err := c.CloudBroker().Account().UpdateUser(ctx, req) - if err != nil { - return diag.FromErr(err) - } + _, err := c.CloudBroker().Account().UpdateUser(ctx, account.UpdateUserRequest{ + AccountID: accountId, + UserID: userConv["user_id"].(string), + AccessType: strings.ToUpper(userConv["access_type"].(string)), + }) + + if err != nil { + return diag.FromErr(err) } } - } - return nil + return resourceAccountRead(ctx, d, m) } func isContainsUser(els []interface{}, el interface{}) bool { diff --git a/internal/service/cloudbroker/account/utility_account.go b/internal/service/cloudbroker/account/utility_account.go index 7a70f04..94c6e7d 100644 --- a/internal/service/cloudbroker/account/utility_account.go +++ b/internal/service/cloudbroker/account/utility_account.go @@ -44,13 +44,15 @@ import ( func utilityAccountCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*account.RecordAccount, error) { c := m.(*controller.ControllerCfg) + + accountID := d.Get("account_id").(int) + req := account.GetRequest{} - if (strconv.Itoa(d.Get("account_id").(int))) != "0" { - req.AccountID = uint64(d.Get("account_id").(int)) + if d.Id() == "" { + req.AccountID = uint64(accountID) } else { - id, _ := strconv.ParseUint(d.Id(), 10, 64) - req.AccountID = id + req.AccountID, _ = strconv.ParseUint(d.Id(), 10, 64) } log.Debugf("utilityAccountCheckPresence: load account") diff --git a/internal/service/cloudbroker/account/utility_account_computes_list.go b/internal/service/cloudbroker/account/utility_account_computes_list.go index 1cb3cab..12b7cd7 100644 --- a/internal/service/cloudbroker/account/utility_account_computes_list.go +++ b/internal/service/cloudbroker/account/utility_account_computes_list.go @@ -47,6 +47,46 @@ func utilityAccountComputesListCheckPresence(ctx context.Context, d *schema.Reso AccountID: uint64(d.Get("account_id").(int)), } + if compute_id, ok := d.GetOk("compute_id"); ok { + req.ComputeID = uint64(compute_id.(int)) + } + + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + + if rg_name, ok := d.GetOk("rg_name"); ok { + req.RGName = rg_name.(string) + } + + if rg_id, ok := d.GetOk("rg_id"); ok { + req.RGID = uint64(rg_id.(int)) + } + + if tech_status, ok := d.GetOk("tech_status"); ok { + req.TechStatus = tech_status.(string) + } + + if ip_address, ok := d.GetOk("ip_address"); ok { + req.IPAddress = ip_address.(string) + } + + if extnet_name, ok := d.GetOk("extnet_name"); ok { + req.ExtNetName = extnet_name.(string) + } + + if extnet_id, ok := d.GetOk("extnet_id"); ok { + req.ExtNetID = uint64(extnet_id.(int)) + } + + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + log.Debugf("utilityAccountComputesListCheckPresence: load account list") accountComputesList, err := c.CloudBroker().Account().ListComputes(ctx, req) if err != nil { diff --git a/internal/service/cloudbroker/account/utility_account_deleted_list.go b/internal/service/cloudbroker/account/utility_account_deleted_list.go index 62dcbce..500c275 100644 --- a/internal/service/cloudbroker/account/utility_account_deleted_list.go +++ b/internal/service/cloudbroker/account/utility_account_deleted_list.go @@ -48,10 +48,23 @@ func utilityAccountDeletedListCheckPresence(ctx context.Context, d *schema.Resou if page, ok := d.GetOk("page"); ok { req.Page = uint64(page.(int)) } + if size, ok := d.GetOk("size"); ok { req.Size = uint64(size.(int)) } + if by_id, ok := d.GetOk("by_id"); ok { + req.ByID = uint64(by_id.(int)) + } + + if acl, ok := d.GetOk("acl"); ok { + req.ACL = acl.(string) + } + + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + log.Debugf("utilityAccountDeletedListCheckPresence: load") accountDeletedList, err := c.CloudBroker().Account().ListDeleted(ctx, req) if err != nil { diff --git a/internal/service/cloudbroker/account/utility_account_disks_list.go b/internal/service/cloudbroker/account/utility_account_disks_list.go index 6e243dd..c28b083 100644 --- a/internal/service/cloudbroker/account/utility_account_disks_list.go +++ b/internal/service/cloudbroker/account/utility_account_disks_list.go @@ -47,6 +47,30 @@ func utilityAccountDisksListCheckPresence(ctx context.Context, d *schema.Resourc AccountID: uint64(d.Get("account_id").(int)), } + if disk_id, ok := d.GetOk("disk_id"); ok { + req.DiskID = uint64(disk_id.(int)) + } + + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + + if disk_max_size, ok := d.GetOk("disk_max_size"); ok { + req.DiskMaxSize = uint64(disk_max_size.(int)) + } + + if typeVal, ok := d.GetOk("type"); ok { + req.Type = typeVal.(string) + } + + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + log.Debugf("utilityAccountDisksListCheckPresence: load account list") accountDisksList, err := c.CloudBroker().Account().ListDisks(ctx, req) if err != nil { diff --git a/internal/service/cloudbroker/account/utility_account_flip_groups.go b/internal/service/cloudbroker/account/utility_account_flip_groups.go index 186cdb1..c3f9d04 100644 --- a/internal/service/cloudbroker/account/utility_account_flip_groups.go +++ b/internal/service/cloudbroker/account/utility_account_flip_groups.go @@ -47,6 +47,38 @@ func utilityAccountFlipGroupsListCheckPresence(ctx context.Context, d *schema.Re AccountID: uint64(d.Get("account_id").(int)), } + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + + if vins_id, ok := d.GetOk("vins_id"); ok { + req.VINSID = uint64(vins_id.(int)) + } + + if vins_name, ok := d.GetOk("vins_name"); ok { + req.VINSName = vins_name.(string) + } + + if extnet_id, ok := d.GetOk("extnet_id"); ok { + req.ExtNetID = uint64(extnet_id.(int)) + } + + if by_ip, ok := d.GetOk("by_ip"); ok { + req.ByIP = by_ip.(string) + } + + if flipgroup_id, ok := d.GetOk("flipgroup_id"); ok { + req.FLIPGroupID = uint64(flipgroup_id.(int)) + } + + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + log.Debugf("utilityAccountFlipGroupsListCheckPresence") accountFlipGroupsList, err := c.CloudBroker().Account().ListFLIPGroups(ctx, req) if err != nil { diff --git a/internal/service/cloudbroker/account/utility_account_get_resource_consumption.go b/internal/service/cloudbroker/account/utility_account_get_resource_consumption.go new file mode 100644 index 0000000..60a8bf2 --- /dev/null +++ b/internal/service/cloudbroker/account/utility_account_get_resource_consumption.go @@ -0,0 +1,61 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package account + +import ( + "context" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityAccountResourceConsumptionGetCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*account.RecordResourceConsumption, error) { + c := m.(*controller.ControllerCfg) + + id := uint64(d.Get("account_id").(int)) + + req:= account.GetResourceConsumptionRequest { + AccountID: id, + } + + log.Debugf("utilityAccountResourceConsumptionGetCheckPresence: load") + accountResourceConsumptionRec, err := c.CloudBroker().Account().GetResourceConsumption(ctx, req) + if err != nil { + return nil, err + } + + return accountResourceConsumptionRec, nil +} diff --git a/internal/service/cloudbroker/account/utility_account_list.go b/internal/service/cloudbroker/account/utility_account_list.go index a9d35e3..ab26381 100644 --- a/internal/service/cloudbroker/account/utility_account_list.go +++ b/internal/service/cloudbroker/account/utility_account_list.go @@ -52,6 +52,22 @@ func utilityAccountListCheckPresence(ctx context.Context, d *schema.ResourceData req.Size = uint64(size.(int)) } + if by_id, ok := d.GetOk("by_id"); ok { + req.ByID = uint64(by_id.(int)) + } + + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + + if acl, ok := d.GetOk("acl"); ok { + req.ACL = acl.(string) + } + + if status, ok := d.GetOk("status"); ok { + req.Status = status.(string) + } + log.Debugf("utilityAccountListCheckPresence: load account list") accountList, err := c.CloudBroker().Account().List(ctx, req) if err != nil { diff --git a/internal/service/cloudbroker/rg/api.go b/internal/service/cloudbroker/account/utility_account_resource_consumption_list.go similarity index 58% rename from internal/service/cloudbroker/rg/api.go rename to internal/service/cloudbroker/account/utility_account_resource_consumption_list.go index 6248810..feb995e 100644 --- a/internal/service/cloudbroker/rg/api.go +++ b/internal/service/cloudbroker/account/utility_account_resource_consumption_list.go @@ -1,39 +1,53 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package rg - -const ResgroupCreateAPI = "/restmachine/cloudbroker/rg/create" -const ResgroupUpdateAPI = "/restmachine/cloudbroker/rg/update" -const ResgroupListAPI = "/restmachine/cloudbroker/rg/list" -const ResgroupGetAPI = "/restmachine/cloudbroker/rg/get" -const ResgroupDeleteAPI = "/restmachine/cloudbroker/rg/delete" -const RgListComputesAPI = "/restmachine/cloudbroker/rg/listComputes" +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package account + +import ( + "context" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityAccountResourceConsumptionListCheckPresence(ctx context.Context, m interface{}) (*account.ListResources, error) { + c := m.(*controller.ControllerCfg) + + log.Debugf("utilityAccountResourceConsumptionListCheckPresence: load") + accountResourceConsumptionList, err := c.CloudBroker().Account().ListResourceConsumption(ctx) + if err != nil { + return nil, err + } + + return accountResourceConsumptionList, nil +} diff --git a/internal/service/cloudbroker/account/utility_account_rg_list.go b/internal/service/cloudbroker/account/utility_account_rg_list.go index efbedee..da55500 100644 --- a/internal/service/cloudbroker/account/utility_account_rg_list.go +++ b/internal/service/cloudbroker/account/utility_account_rg_list.go @@ -47,6 +47,34 @@ func utilityAccountRGListCheckPresence(ctx context.Context, d *schema.ResourceDa AccountID: uint64(d.Get("account_id").(int)), } + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + + if rg_id, ok := d.GetOk("rg_id"); ok { + req.RGID = uint64(rg_id.(int)) + } + + if vins_id, ok := d.GetOk("vins_id"); ok { + req.VINSID = uint64(vins_id.(int)) + } + + if vm_id, ok := d.GetOk("vm_id"); ok { + req.VMID = uint64(vm_id.(int)) + } + + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + + if status, ok := d.GetOk("status"); ok { + req.Status = status.(string) + } + log.Debugf("utilityAccountRGListCheckPresence: load account list") accountRGList, err := c.CloudBroker().Account().ListRG(ctx, req) if err != nil { diff --git a/internal/service/cloudbroker/account/utility_account_vins_list.go b/internal/service/cloudbroker/account/utility_account_vins_list.go index abf17d8..a995725 100644 --- a/internal/service/cloudbroker/account/utility_account_vins_list.go +++ b/internal/service/cloudbroker/account/utility_account_vins_list.go @@ -47,6 +47,30 @@ func utilityAccountVinsListCheckPresence(ctx context.Context, d *schema.Resource AccountID: uint64(d.Get("account_id").(int)), } + if vins_id, ok := d.GetOk("vins_id"); ok { + req.VINSID = uint64(vins_id.(int)) + } + + if rg_id, ok := d.GetOk("rg_id"); ok { + req.RGID = uint64(rg_id.(int)) + } + + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + + if ext_ip, ok := d.GetOk("ext_ip"); ok { + req.ExtIP = ext_ip.(string) + } + log.Debugf("utilityAccountVinsListCheckPresence: load account list") accountVinsList, err := c.CloudBroker().Account().ListVINS(ctx, req) if err != nil { diff --git a/internal/service/cloudbroker/disks/data_source_disk.go b/internal/service/cloudbroker/disks/data_source_disk.go index 8bf6fdd..5b7028a 100644 --- a/internal/service/cloudbroker/disks/data_source_disk.go +++ b/internal/service/cloudbroker/disks/data_source_disk.go @@ -79,6 +79,22 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema { Type: schema.TypeInt, Computed: true, }, + "computes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "compute_id": { + Type: schema.TypeString, + Computed: true, + }, + "compute_name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, "created_time": { Type: schema.TypeInt, Computed: true, @@ -222,6 +238,13 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema { Type: schema.TypeString, Computed: true, }, + "present_to": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, "purge_attempts": { Type: schema.TypeInt, Computed: true, @@ -258,6 +281,10 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema { Type: schema.TypeString, Computed: true, }, + "shareable": { + Type: schema.TypeString, + Computed: true, + }, "size_max": { Type: schema.TypeInt, Computed: true, @@ -279,6 +306,10 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema { Type: schema.TypeString, Computed: true, }, + "reference_id": { + Type: schema.TypeString, + Computed: true, + }, "res_id": { Type: schema.TypeString, Computed: true, diff --git a/internal/service/cloudbroker/disks/data_source_disk_list.go b/internal/service/cloudbroker/disks/data_source_disk_list.go index 5abcc6e..b010930 100644 --- a/internal/service/cloudbroker/disks/data_source_disk_list.go +++ b/internal/service/cloudbroker/disks/data_source_disk_list.go @@ -49,12 +49,43 @@ func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m inter id := uuid.New() d.SetId(id.String()) d.Set("items", flattenDiskList(diskList)) + d.Set("entry_count", diskList.EntryCount) return nil } func dataSourceDiskListSchemaMake() map[string]*schema.Schema { res := map[string]*schema.Schema{ + "by_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Find by ID", + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: "Find by name", + }, + "account_name": { + Type: schema.TypeString, + Optional: true, + Description: "Find by account name", + }, + "disk_max_size": { + Type: schema.TypeInt, + Optional: true, + Description: "Find by max disk size", + }, + "status": { + Type: schema.TypeString, + Optional: true, + Description: "Find by status", + }, + "shared": { + Type: schema.TypeBool, + Optional: true, + Description: "Find by shared field", + }, "account_id": { Type: schema.TypeInt, Optional: true, @@ -65,6 +96,16 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema { Optional: true, Description: "type of the disks", }, + "sep_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Find by sep id", + }, + "pool": { + Type: schema.TypeString, + Optional: true, + Description: "Find by pool name", + }, "page": { Type: schema.TypeInt, Optional: true, @@ -96,6 +137,22 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema { Type: schema.TypeInt, Computed: true, }, + "computes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "compute_id": { + Type: schema.TypeString, + Computed: true, + }, + "compute_name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, "created_time": { Type: schema.TypeInt, Computed: true, @@ -251,6 +308,13 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema { Type: schema.TypeString, Computed: true, }, + "present_to": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, "purge_attempts": { Type: schema.TypeInt, Computed: true, @@ -287,6 +351,10 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema { Type: schema.TypeString, Computed: true, }, + "shareable": { + Type: schema.TypeBool, + Computed: true, + }, "size_max": { Type: schema.TypeInt, Computed: true, @@ -308,6 +376,10 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema { Type: schema.TypeString, Computed: true, }, + "reference_id": { + Type: schema.TypeString, + Computed: true, + }, "res_id": { Type: schema.TypeString, Computed: true, @@ -346,6 +418,10 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema { }, }, }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + }, } return res } diff --git a/internal/service/cloudbroker/disks/data_source_disk_list_types.go b/internal/service/cloudbroker/disks/data_source_disk_list_types.go new file mode 100644 index 0000000..dfabdae --- /dev/null +++ b/internal/service/cloudbroker/disks/data_source_disk_list_types.go @@ -0,0 +1,56 @@ +package disks + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + listTypes, err := utilityDiskListTypesCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", listTypes.Data) + d.Set("entry_count", listTypes.EntryCount) + + return nil +} + +func dataSourceDiskListTypesSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", + }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + }, + } + return res +} + +func DataSourceDiskListTypes() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + ReadContext: dataSourceDiskListTypesRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceDiskListTypesSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/disks/data_source_disk_list_types_detailed.go b/internal/service/cloudbroker/disks/data_source_disk_list_types_detailed.go new file mode 100644 index 0000000..c3c43d5 --- /dev/null +++ b/internal/service/cloudbroker/disks/data_source_disk_list_types_detailed.go @@ -0,0 +1,84 @@ +package disks + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceDiskListTypesDetailedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + listTypesDetailed, err := utilityDiskListTypesDetailedCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenDiskListTypesDetailed(listTypesDetailed)) + d.Set("entry_count", listTypesDetailed.EntryCount) + return nil +} + +func DataSourceDiskListTypesDetailed() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + ReadContext: dataSourceDiskListTypesDetailedRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: map[string]*schema.Schema{ + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pools": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Pool name", + }, + "system": { + Type: schema.TypeString, + Computed: true, + }, + "types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", + }, + }, + }, + }, + "sep_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Storage endpoint provider ID to create disk", + }, + "sep_name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} diff --git a/internal/service/cloudbroker/disks/data_source_disk_list_unattached.go b/internal/service/cloudbroker/disks/data_source_disk_list_unattached.go new file mode 100644 index 0000000..d3cad73 --- /dev/null +++ b/internal/service/cloudbroker/disks/data_source_disk_list_unattached.go @@ -0,0 +1,470 @@ +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, +Nikita Sorokin, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package disks + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceDiskListUnattachedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + diskListUnattached, err := utilityDiskListUnattachedCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenDiskListUnattached(diskListUnattached)) + d.Set("entry_count", diskListUnattached.EntryCount) + + return nil +} + +func DataSourceDiskListUnattached() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceDiskListUnattachedRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceDiskListUnattachedSchemaMake(), + } +} + +func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "by_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Find by ID", + }, + "account_name": { + Type: schema.TypeString, + Optional: true, + Description: "Find by account name", + }, + "disk_max_size": { + Type: schema.TypeInt, + Optional: true, + Description: "Find by max disk size", + }, + "status": { + Type: schema.TypeString, + Optional: true, + Description: "Find by status", + }, + "type": { + Type: schema.TypeString, + Optional: true, + Description: "type of the disks", + }, + "account_id": { + Type: schema.TypeInt, + Optional: true, + Description: "ID of the account the disks belong to", + }, + "sep_id": { + Type: schema.TypeInt, + Optional: true, + Description: "ID of SEP", + }, + "pool": { + Type: schema.TypeString, + Optional: true, + }, + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "Page number", + }, + "size": { + Type: schema.TypeInt, + Optional: true, + Description: "Page size", + }, + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "_ckey": { + Type: schema.TypeString, + Computed: true, + Description: "CKey", + }, + "_meta": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Meta parameters", + }, + "account_id": { + Type: schema.TypeInt, + Computed: true, + Description: "ID of the account the disks belong to", + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + Description: "The name of the subscriber '(account') to whom this disk belongs", + }, + "acl": { + Type: schema.TypeString, + Computed: true, + }, + "boot_partition": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of disk partitions", + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + Description: "Created time", + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + Description: "Deleted time", + }, + "desc": { + Type: schema.TypeString, + Computed: true, + Description: "Description of disk", + }, + "destruction_time": { + Type: schema.TypeInt, + Computed: true, + Description: "Time of final deletion", + }, + "disk_path": { + Type: schema.TypeString, + Computed: true, + Description: "Disk path", + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + Description: "ID of the grid (platform)", + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + Description: "Disk ID on the storage side", + }, + "disk_id": { + Type: schema.TypeInt, + Computed: true, + Description: "The unique ID of the subscriber-owner of the disk", + }, + "image_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Image ID", + }, + "images": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "IDs of images using the disk", + }, + "iotune": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "read_bytes_sec": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of bytes to read per second", + }, + "read_bytes_sec_max": { + Type: schema.TypeInt, + Computed: true, + Description: "Maximum number of bytes to read", + }, + "read_iops_sec": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of io read operations per second", + }, + "read_iops_sec_max": { + Type: schema.TypeInt, + Computed: true, + Description: "Maximum number of io read operations", + }, + "size_iops_sec": { + Type: schema.TypeInt, + Computed: true, + Description: "Size of io operations", + }, + "total_bytes_sec": { + Type: schema.TypeInt, + Computed: true, + Description: "Total size bytes per second", + }, + "total_bytes_sec_max": { + Type: schema.TypeInt, + Computed: true, + Description: "Maximum total size of bytes per second", + }, + "total_iops_sec": { + Type: schema.TypeInt, + Computed: true, + Description: "Total number of io operations per second", + }, + "total_iops_sec_max": { + Type: schema.TypeInt, + Computed: true, + Description: "Maximum total number of io operations per second", + }, + "write_bytes_sec": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of bytes to write per second", + }, + "write_bytes_sec_max": { + Type: schema.TypeInt, + Computed: true, + Description: "Maximum number of bytes to write per second", + }, + "write_iops_sec": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of write operations per second", + }, + "write_iops_sec_max": { + Type: schema.TypeInt, + Computed: true, + Description: "Maximum number of write operations per second", + }, + }, + }, + }, + "iqn": { + Type: schema.TypeString, + Computed: true, + Description: "Disk IQN", + }, + "login": { + Type: schema.TypeString, + Computed: true, + Description: "Login to access the disk", + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + Description: "Milestones", + }, + "disk_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of disk", + }, + "order": { + Type: schema.TypeInt, + Computed: true, + Description: "Disk order", + }, + "params": { + Type: schema.TypeString, + Computed: true, + Description: "Disk params", + }, + "parent_id": { + Type: schema.TypeInt, + Computed: true, + Description: "ID of the parent disk", + }, + "passwd": { + Type: schema.TypeString, + Computed: true, + Description: "Password to access the disk", + }, + "pci_slot": { + Type: schema.TypeInt, + Computed: true, + Description: "ID of the pci slot to which the disk is connected", + }, + "pool": { + Type: schema.TypeString, + Computed: true, + Description: "Pool for disk location", + }, + "present_to": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "purge_attempts": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of deletion attempts", + }, + "purge_time": { + Type: schema.TypeInt, + Computed: true, + Description: "Time of the last deletion attempt", + }, + "reality_device_number": { + Type: schema.TypeInt, + Computed: true, + Description: "Reality device number", + }, + "reference_id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the reference to the disk", + }, + "res_id": { + Type: schema.TypeString, + Computed: true, + Description: "Resource ID", + }, + "res_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the resource", + }, + "role": { + Type: schema.TypeString, + Computed: true, + Description: "Disk role", + }, + "sep_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Storage endpoint provider ID to create disk", + }, + "shareable": { + Type: schema.TypeBool, + Computed: true, + }, + "size_max": { + Type: schema.TypeInt, + Computed: true, + Description: "Size in GB", + }, + "size_used": { + Type: schema.TypeFloat, + Computed: true, + Description: "Number of used space, in GB", + }, + "snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "guid": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the snapshot", + }, + "label": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the snapshot", + }, + "reference_id": { + Type: schema.TypeString, + Computed: true, + }, + "res_id": { + Type: schema.TypeString, + Computed: true, + Description: "Reference to the snapshot", + }, + "snap_set_guid": { + Type: schema.TypeString, + Computed: true, + Description: "The set snapshot ID", + }, + "snap_set_time": { + Type: schema.TypeInt, + Computed: true, + Description: "The set time of the snapshot", + }, + "timestamp": { + Type: schema.TypeInt, + Computed: true, + Description: "Snapshot time", + }, + }, + }, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Disk status", + }, + "tech_status": { + Type: schema.TypeString, + Computed: true, + Description: "Technical status of the disk", + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", + }, + "vmid": { + Type: schema.TypeInt, + Computed: true, + Description: "Virtual Machine ID (Deprecated)", + }, + }, + }, + }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + }, + } + return res +} diff --git a/internal/service/cloudbroker/disks/data_source_disk_snapshot.go b/internal/service/cloudbroker/disks/data_source_disk_snapshot.go new file mode 100644 index 0000000..daa7923 --- /dev/null +++ b/internal/service/cloudbroker/disks/data_source_disk_snapshot.go @@ -0,0 +1,129 @@ +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package disks + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + disk, err := utilityDiskCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + var snapshot disks.ItemSnapshot + label := d.Get("label").(string) + for _, sn := range disk.Snapshots { + if label == sn.Label { + snapshot = sn + break + } + } + if label != snapshot.Label { + return diag.Errorf("Snapshot with label \"%v\" not found", label) + } + + id := uuid.New() + d.SetId(id.String()) + + flattenDiskSnapshot(d, snapshot) + + return nil +} + +func DataSourceDiskSnapshot() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceDiskSnapshotRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceDiskSnapshotSchemaMake(), + } +} + +func dataSourceDiskSnapshotSchemaMake() map[string]*schema.Schema { + rets := map[string]*schema.Schema{ + "disk_id": { + Type: schema.TypeInt, + Required: true, + Description: "The unique ID of the subscriber-owner of the disk", + }, + "label": { + Type: schema.TypeString, + Required: true, + Description: "Name of the snapshot", + }, + "guid": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the snapshot", + }, + "timestamp": { + Type: schema.TypeInt, + Computed: true, + Description: "Snapshot time", + }, + "reference_id": { + Type: schema.TypeString, + Computed: true, + }, + "res_id": { + Type: schema.TypeString, + Computed: true, + Description: "Reference to the snapshot", + }, + "snap_set_guid": { + Type: schema.TypeString, + Computed: true, + Description: "The set snapshot ID", + }, + "snap_set_time": { + Type: schema.TypeInt, + Computed: true, + Description: "The set time of the snapshot", + }, + } + return rets +} diff --git a/internal/service/cloudbroker/disks/data_source_disk_snapshot_list.go b/internal/service/cloudbroker/disks/data_source_disk_snapshot_list.go new file mode 100644 index 0000000..e9471e7 --- /dev/null +++ b/internal/service/cloudbroker/disks/data_source_disk_snapshot_list.go @@ -0,0 +1,122 @@ +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package disks + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceDiskSnapshotListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + disk, err := utilityDiskCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenDiskSnapshotList(disk.Snapshots)) + return nil +} + +func DataSourceDiskSnapshotList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceDiskSnapshotListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceDiskSnapshotListSchemaMake(), + } +} + +func dataSourceDiskSnapshotListSchemaMake() map[string]*schema.Schema { + rets := map[string]*schema.Schema{ + "disk_id": { + Type: schema.TypeInt, + Required: true, + Description: "The unique ID of the subscriber-owner of the disk", + }, + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "label": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the snapshot", + }, + "guid": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the snapshot", + }, + "reference_id": { + Type: schema.TypeString, + Computed: true, + }, + "timestamp": { + Type: schema.TypeInt, + Computed: true, + Description: "Snapshot time", + }, + "res_id": { + Type: schema.TypeString, + Computed: true, + Description: "Reference to the snapshot", + }, + "snap_set_guid": { + Type: schema.TypeString, + Computed: true, + Description: "The set snapshot ID", + }, + "snap_set_time": { + Type: schema.TypeInt, + Computed: true, + Description: "The set time of the snapshot", + }, + }, + }, + }, + } + return rets +} diff --git a/internal/service/cloudbroker/disks/flattens.go b/internal/service/cloudbroker/disks/flattens.go index ba66670..291edcf 100644 --- a/internal/service/cloudbroker/disks/flattens.go +++ b/internal/service/cloudbroker/disks/flattens.go @@ -5,6 +5,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" ) func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) { @@ -14,6 +15,7 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) { d.Set("account_name", disk.AccountName) d.Set("acl", string(diskAcl)) d.Set("boot_partition", disk.BootPartition) + d.Set("computes", flattenDiskComputes(disk.Computes)) d.Set("created_time", disk.CreatedTime) d.Set("deleted_time", disk.DeletedTime) d.Set("desc", disk.Description) @@ -37,6 +39,7 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) { d.Set("pci_slot", disk.PCISlot) d.Set("pool", disk.Pool) d.Set("purge_attempts", disk.PurgeAttempts) + d.Set("present_to", disk.PresentTo) d.Set("purge_time", disk.PurgeTime) d.Set("reality_device_number", disk.RealityDeviceNumber) d.Set("reference_id", disk.ReferenceID) @@ -45,6 +48,7 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) { d.Set("role", disk.Role) d.Set("sep_id", disk.SEPID) d.Set("sep_type", disk.SEPType) + d.Set("shareable", disk.Shareable) d.Set("size_max", disk.SizeMax) d.Set("size_used", disk.SizeUsed) d.Set("snapshots", flattendDiskSnapshotList(disk.Snapshots)) @@ -54,6 +58,27 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) { d.Set("vmid", disk.VMID) } +func flattenDiskSnapshot(d *schema.ResourceData, snapshot disks.ItemSnapshot) { + d.Set("timestamp", snapshot.Timestamp) + d.Set("guid", snapshot.GUID) + d.Set("reference_id", snapshot.ReferenceID) + d.Set("res_id", snapshot.ResID) + d.Set("snap_set_guid", snapshot.SnapSetGUID) + d.Set("snap_set_time", snapshot.SnapSetTime) +} + +func flattenDiskComputes(computes map[string]string) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(computes)) + for key, val := range computes { + tmp := map[string]interface{}{ + "compute_id": key, + "compute_name": val, + } + res = append(res, tmp) + } + return res +} + func flattenIOTune(iot disks.IOTune) []map[string]interface{} { res := make([]map[string]interface{}, 0) temp := map[string]interface{}{ @@ -140,6 +165,7 @@ func flattendDiskSnapshotList(sl disks.ListSnapshots) []interface{} { temp := map[string]interface{}{ "guid": snapshot.GUID, "label": snapshot.Label, + "reference_id": snapshot.ReferenceID, "res_id": snapshot.ResID, "snap_set_guid": snapshot.SnapSetGUID, "snap_set_time": snapshot.SnapSetTime, @@ -151,3 +177,105 @@ func flattendDiskSnapshotList(sl disks.ListSnapshots) []interface{} { return res } + +func flattenDiskListTypesDetailed(tld *disks.ListTypes) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, typeListDetailed := range tld.Data { + toMap := typeListDetailed.(map[string]interface{}) + temp := map[string]interface{}{ + "pools": flattenListTypesDetailedPools(toMap["pools"].([]interface{})), + "sep_id": toMap["sepId"].(float64), + "sep_name": toMap["sepName"].(string), + } + res = append(res, temp) + } + return res +} + +func flattenListTypesDetailedPools(pools []interface{}) []interface{} { + res := make([]interface{}, 0) + for _, pool := range pools { + toMap := pool.(map[string]interface{}) + temp := map[string]interface{}{ + "name": toMap["name"].(string), + "system": toMap["system"].(string), + "types": toMap["types"].([]interface{}), + } + res = append(res, temp) + } + + return res +} + +func flattenDiskListUnattached(ul *disks.ListUnattachedDisks) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, unattachedDisk := range ul.Data { + unattachedDiskAcl, _ := json.Marshal(unattachedDisk.ACL) + tmp := map[string]interface{}{ + "_ckey": unattachedDisk.CKey, + "_meta": flattens.FlattenMeta(unattachedDisk.Meta), + "account_id": unattachedDisk.AccountID, + "account_name": unattachedDisk.AccountName, + "acl": string(unattachedDiskAcl), + "boot_partition": unattachedDisk.BootPartition, + "created_time": unattachedDisk.CreatedTime, + "deleted_time": unattachedDisk.DeletedTime, + "desc": unattachedDisk.Description, + "destruction_time": unattachedDisk.DestructionTime, + "disk_path": unattachedDisk.DiskPath, + "gid": unattachedDisk.GID, + "guid": unattachedDisk.GUID, + "disk_id": unattachedDisk.ID, + "image_id": unattachedDisk.ImageID, + "images": unattachedDisk.Images, + "iotune": flattenIOTune(unattachedDisk.IOTune), + "iqn": unattachedDisk.IQN, + "login": unattachedDisk.Login, + "milestones": unattachedDisk.Milestones, + "disk_name": unattachedDisk.Name, + "order": unattachedDisk.Order, + "params": unattachedDisk.Params, + "parent_id": unattachedDisk.ParentID, + "passwd": unattachedDisk.Password, + "pci_slot": unattachedDisk.PCISlot, + "pool": unattachedDisk.Pool, + "present_to": unattachedDisk.PresentTo, + "purge_attempts": unattachedDisk.PurgeAttempts, + "purge_time": unattachedDisk.PurgeTime, + "reality_device_number": unattachedDisk.RealityDeviceNumber, + "reference_id": unattachedDisk.ReferenceID, + "res_id": unattachedDisk.ResID, + "res_name": unattachedDisk.ResName, + "role": unattachedDisk.Role, + "sep_id": unattachedDisk.SEPID, + "shareable": unattachedDisk.Shareable, + "size_max": unattachedDisk.SizeMax, + "size_used": unattachedDisk.SizeUsed, + "snapshots": flattenDiskSnapshotList(unattachedDisk.Snapshots), + "status": unattachedDisk.Status, + "tech_status": unattachedDisk.TechStatus, + "type": unattachedDisk.Type, + "vmid": unattachedDisk.VMID, + } + res = append(res, tmp) + } + return res +} + +func flattenDiskSnapshotList(sl disks.ListSnapshots) []interface{} { + res := make([]interface{}, 0) + for _, snapshot := range sl { + temp := map[string]interface{}{ + "guid": snapshot.GUID, + "label": snapshot.Label, + "reference_id": snapshot.ReferenceID, + "res_id": snapshot.ResID, + "snap_set_guid": snapshot.SnapSetGUID, + "snap_set_time": snapshot.SnapSetTime, + "timestamp": snapshot.Timestamp, + } + res = append(res, temp) + } + + return res +} diff --git a/internal/service/cloudbroker/disks/resource_check_input_values.go b/internal/service/cloudbroker/disks/resource_check_input_values.go new file mode 100644 index 0000000..a27b70b --- /dev/null +++ b/internal/service/cloudbroker/disks/resource_check_input_values.go @@ -0,0 +1,55 @@ +package disks + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func existAccountID(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + accountID := uint64(d.Get("account_id").(int)) + + accountList, err := c.CloudBroker().Account().List(ctx, account.ListRequest{}) + if err != nil { + return err + } + + if len(accountList.FilterByID(accountID).Data) == 0 { + return fmt.Errorf("resourceDiskCreate: can't create/update Disk because AccountID %d is not allowed or does not exist", accountID) + } + + return nil +} + +func existGID(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + gid := uint64(d.Get("gid").(int)) + + gidList, err := c.CloudBroker().Grid().List(ctx, grid.ListRequest{}) + if err != nil { + return err + } + + for _, elem := range gidList.Data { + if elem.GID == gid { + return nil + } + } + + return fmt.Errorf("resourceDiskCreate: can't create/update Disk because GID %d is not allowed or does not exist", gid) +} + +func checkParamsExists(ctx context.Context, d *schema.ResourceData, m interface{}) error { + err := existAccountID(ctx, d, m) + if err != nil { + return err + } + + return existGID(ctx, d, m) +} diff --git a/internal/service/cloudbroker/disks/resource_disk.go b/internal/service/cloudbroker/disks/resource_disk.go index 0c1027e..eb5ad31 100644 --- a/internal/service/cloudbroker/disks/resource_disk.go +++ b/internal/service/cloudbroker/disks/resource_disk.go @@ -35,129 +35,233 @@ import ( "context" "fmt" "strconv" - "strings" - log "github.com/sirupsen/logrus" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + log "github.com/sirupsen/logrus" ) func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { c := m.(*controller.ControllerCfg) - req := disks.CreateRequest{} - req.AccountID = uint64(d.Get("account_id").(int)) - req.GID = uint64(d.Get("gid").(int)) - req.Name = d.Get("disk_name").(string) - req.Size = uint64(d.Get("size_max").(int)) + err := checkParamsExists(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } - if typeRaw, ok := d.GetOk("type"); ok { - req.Type = strings.ToUpper(typeRaw.(string)) - } else { - req.Type = "D" + req := disks.CreateRequest{ + AccountID: uint64(d.Get("account_id").(int)), + GID: uint64(d.Get("gid").(int)), + Name: d.Get("disk_name").(string), + Size: uint64(d.Get("size_max").(int)), + Type: d.Get("type").(string), } - if sepId, ok := d.GetOk("sep_id"); ok { - req.SEPID = uint64(sepId.(int)) + if desc, ok := d.GetOk("desc"); ok { + req.Description = desc.(string) } - if poolName, ok := d.GetOk("pool"); ok { - req.Pool = poolName.(string) + if ssdSize, ok := d.GetOk("ssd_size"); ok { + req.SSDSize = uint64(ssdSize.(int)) } - argVal, argSet := d.GetOk("desc") - if argSet { - req.Description = argVal.(string) + if sepID, ok := d.GetOk("sep_id"); ok { + req.SEPID = uint64(sepID.(int)) } - diskId, err := c.CloudBroker().Disks().Create(ctx, req) + if pool, ok := d.GetOk("pool"); ok { + req.Pool = pool.(string) + } + + diskID, err := c.CloudBroker().Disks().Create(ctx, req) if err != nil { return diag.FromErr(err) } - d.SetId(strconv.FormatUint(diskId, 10)) + d.SetId(strconv.FormatUint(diskID, 10)) + + w := dc.Warnings{} if iotuneRaw, ok := d.GetOk("iotune"); ok { iot := iotuneRaw.([]interface{})[0] iotune := iot.(map[string]interface{}) req := disks.LimitIORequest{ - DiskID: diskId, + DiskID: diskID, IOPS: uint64(iotune["total_iops_sec"].(int)), - TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)), ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)), - WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)), - TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)), + ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)), ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)), - WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)), + ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)), + SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)), + TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)), TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)), - ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)), - WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)), TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)), - ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)), + TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)), + WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)), + WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)), + WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)), WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)), - SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)), } _, err := c.CloudBroker().Disks().LimitIO(ctx, req) if err != nil { - return diag.FromErr(err) + w.Add(err) } } - return resourceDiskRead(ctx, d, m) + if shareable := d.Get("shareable"); shareable.(bool) { + _, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{ + DiskID: diskID, + }) + if err != nil { + w.Add(err) + } + } + + return append(w.Get(), resourceDiskRead(ctx, d, m)...) } func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*controller.ControllerCfg) + w := dc.Warnings{} + disk, err := utilityDiskCheckPresence(ctx, d, m) if err != nil { d.SetId("") return diag.FromErr(err) } + hasChangeState := false + + switch disk.Status { + case status.Destroyed, status.Purged: + d.Set("disk_id", 0) + d.SetId("") + return resourceDiskCreate(ctx, d, m) + case status.Deleted: + hasChangeState = true + req := disks.RestoreRequest{ + DiskID: disk.ID, + } + + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } else { + req.Reason = "Terraform automatic restore" + } + + _, err := c.CloudBroker().Disks().Restore(ctx, req) + if err != nil { + w.Add(err) + } + case status.Assigned: + case status.Modeled: + return diag.Errorf("The disk is in status: %s, please, contact support for more information", disk.Status) + case status.Creating: + case status.Created: + case status.Allocated: + case status.Unallocated: + } + + if hasChangeState { + disk, err = utilityDiskCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + flattenDisk(d, disk) - return nil + return w.Get() } func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { c := m.(*controller.ControllerCfg) - diskID, _ := strconv.ParseUint(d.Id(), 10, 64) + w := dc.Warnings{} + + err := checkParamsExists(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + disk, err := utilityDiskCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + hasChangeState := false + + switch disk.Status { + case status.Destroyed, status.Purged: + d.Set("disk_id", 0) + d.SetId("") + return resourceDiskCreate(ctx, d, m) + case status.Deleted: + hasChangeState = true + req := disks.RestoreRequest{ + DiskID: disk.ID, + } + + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } else { + req.Reason = "Terraform automatic restore" + } + + _, err := c.CloudBroker().Disks().Restore(ctx, req) + if err != nil { + return diag.FromErr(err) + } + case status.Assigned: + case status.Modeled: + return diag.Errorf("The disk is in status: %s, please, contact support for more information", disk.Status) + case status.Creating: + case status.Created: + case status.Allocated: + case status.Unallocated: + } + + if hasChangeState { + disk, err = utilityDiskCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + } if d.HasChange("size_max") { oldSize, newSize := d.GetChange("size_max") if oldSize.(int) < newSize.(int) { log.Debugf("resourceDiskUpdate: resizing disk ID %s - %d GB -> %d GB", d.Id(), oldSize.(int), newSize.(int)) - req := disks.ResizeRequest{ - DiskID: diskID, - Size: uint64(newSize.(int)), - } - _, err := c.CloudBroker().Disks().Resize(ctx, req) + _, err := c.CloudBroker().Disks().Resize(ctx, disks.ResizeRequest{ + DiskID: disk.ID, + Size: uint64(newSize.(int)), + }) if err != nil { - return diag.FromErr(err) + w.Add(err) } - - d.Set("size_max", newSize) } else if oldSize.(int) > newSize.(int) { return diag.FromErr(fmt.Errorf("resourceDiskUpdate: Disk ID %s - reducing disk size is not allowed", d.Id())) } } if d.HasChange("disk_name") { - req := disks.RenameRequest{ - DiskID: diskID, + _, err := c.CloudBroker().Disks().Rename(ctx, disks.RenameRequest{ + DiskID: disk.ID, Name: d.Get("disk_name").(string), - } - - _, err := c.CloudBroker().Disks().Rename(ctx, req) + }) if err != nil { - return diag.FromErr(err) + w.Add(err) } } @@ -165,44 +269,49 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface iot := d.Get("iotune").([]interface{})[0] iotune := iot.(map[string]interface{}) req := disks.LimitIORequest{ - DiskID: diskID, IOPS: uint64(iotune["total_iops_sec"].(int)), - TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)), ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)), - WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)), - TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)), + ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)), ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)), - WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)), + ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)), + SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)), + TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)), TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)), - ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)), - WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)), TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)), - ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)), + TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)), + WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)), + WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)), + WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)), WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)), - SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)), } _, err := c.CloudBroker().Disks().LimitIO(ctx, req) if err != nil { - return diag.FromErr(err) + w.Add(err) } } - if d.HasChange("restore") { - if d.Get("restore").(bool) { - req := disks.RestoreRequest{ - DiskID: diskID, - Reason: d.Get("reason").(string), + if d.HasChange("shareable") { + old, new := d.GetChange("shareable") + if !old.(bool) && new.(bool) && !disk.Shareable { + _, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{ + DiskID: disk.ID, + }) + if err != nil { + w.Add(err) } - - _, err := c.CloudBroker().Disks().Restore(ctx, req) + } + if old.(bool) && !new.(bool) && disk.Shareable { + _, err := c.CloudBroker().Disks().Unshare(ctx, disks.UnshareRequest{ + DiskID: disk.ID, + }) if err != nil { - return diag.FromErr(err) + w.Add(err) } } } - return resourceDiskRead(ctx, d, m) + return append(w.Get(), resourceDiskRead(ctx, d, m)...) } func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { @@ -210,6 +319,7 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface disk, err := utilityDiskCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -225,6 +335,8 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface return diag.FromErr(err) } + d.SetId("") + return nil } @@ -233,41 +345,46 @@ func resourceDiskSchemaMake() map[string]*schema.Schema { "account_id": { Type: schema.TypeInt, Required: true, + ForceNew: true, + }, + "gid": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, }, "disk_name": { Type: schema.TypeString, Required: true, }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"D", "B", "T"}, false), + Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", + }, + "desc": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, "size_max": { Type: schema.TypeInt, Required: true, }, - "gid": { + "ssd_size": { Type: schema.TypeInt, - Required: true, - }, - "pool": { - Type: schema.TypeString, Optional: true, - Computed: true, }, "sep_id": { Type: schema.TypeInt, Optional: true, Computed: true, }, - "desc": { + "pool": { Type: schema.TypeString, Optional: true, Computed: true, }, - "type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice([]string{"D", "B", "T"}, false), - }, - "detach": { Type: schema.TypeBool, Optional: true, @@ -286,17 +403,17 @@ func resourceDiskSchemaMake() map[string]*schema.Schema { Default: "", Description: "reason for an action", }, + "shareable": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, "restore": { Type: schema.TypeBool, Optional: true, Default: false, Description: "restore deleting disk", }, - - "disk_id": { - Type: schema.TypeInt, - Computed: true, - }, "account_name": { Type: schema.TypeString, Computed: true, @@ -309,6 +426,22 @@ func resourceDiskSchemaMake() map[string]*schema.Schema { Type: schema.TypeInt, Computed: true, }, + "computes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "compute_id": { + Type: schema.TypeString, + Computed: true, + }, + "compute_name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, "created_time": { Type: schema.TypeInt, Computed: true, @@ -333,6 +466,10 @@ func resourceDiskSchemaMake() map[string]*schema.Schema { Type: schema.TypeInt, Computed: true, }, + "disk_id": { + Type: schema.TypeInt, + Computed: true, + }, "image_id": { Type: schema.TypeInt, Computed: true, @@ -452,7 +589,13 @@ func resourceDiskSchemaMake() map[string]*schema.Schema { Type: schema.TypeInt, Computed: true, }, - + "present_to": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, "purge_attempts": { Type: schema.TypeInt, Computed: true, @@ -481,7 +624,6 @@ func resourceDiskSchemaMake() map[string]*schema.Schema { Type: schema.TypeString, Computed: true, }, - "sep_type": { Type: schema.TypeString, Computed: true, @@ -503,6 +645,10 @@ func resourceDiskSchemaMake() map[string]*schema.Schema { Type: schema.TypeString, Computed: true, }, + "reference_id": { + Type: schema.TypeString, + Computed: true, + }, "res_id": { Type: schema.TypeString, Computed: true, diff --git a/internal/service/cloudbroker/disks/resource_disk_snapshot.go b/internal/service/cloudbroker/disks/resource_disk_snapshot.go new file mode 100644 index 0000000..c550d88 --- /dev/null +++ b/internal/service/cloudbroker/disks/resource_disk_snapshot.go @@ -0,0 +1,215 @@ +package disks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*controller.ControllerCfg) + + disk, err := utilityDiskCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + snapshots := disk.Snapshots + snapshot := disks.ItemSnapshot{} + label := d.Get("label").(string) + for _, sn := range snapshots { + if label == sn.Label { + snapshot = sn + break + } + } + if label != snapshot.Label { + return diag.Errorf("Snapshot with label \"%v\" not found", label) + } + + if rollback := d.Get("rollback").(bool); rollback { + req := disks.SnapshotRollbackRequest{ + DiskID: disk.ID, + Label: label, + TimeStamp: uint64(d.Get("timestamp").(int)), + } + + log.Debugf("resourceDiskCreate: Snapshot rollback with label", label) + _, err := c.CloudBroker().Disks().SnapshotRollback(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + return resourceDiskSnapshotRead(ctx, d, m) +} + +func resourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + disk, err := utilityDiskCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + snapshots := disk.Snapshots + snapshot := disks.ItemSnapshot{} + label := d.Get("label").(string) + for _, sn := range snapshots { + if label == sn.Label { + snapshot = sn + break + } + } + if label != snapshot.Label { + return diag.Errorf("Snapshot with label \"%v\" not found", label) + } + + flattenDiskSnapshot(d, snapshot) + + return nil +} + +func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*controller.ControllerCfg) + disk, err := utilityDiskCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + snapshots := disk.Snapshots + snapshot := disks.ItemSnapshot{} + label := d.Get("label").(string) + for _, sn := range snapshots { + if label == sn.Label { + snapshot = sn + break + } + } + + if label != snapshot.Label { + return diag.Errorf("Snapshot with label \"%v\" not found", label) + } + + if d.HasChange("rollback") && d.Get("rollback").(bool) == true { + req := disks.SnapshotRollbackRequest{ + DiskID: disk.ID, + Label: label, + TimeStamp: uint64(d.Get("timestamp").(int)), + } + + log.Debugf("resourceDiskUpdtae: Snapshot rollback with label", label) + _, err := c.CloudBroker().Disks().SnapshotRollback(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + + return resourceDiskSnapshotRead(ctx, d, m) +} + +func resourceDiskSnapshotDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*controller.ControllerCfg) + + disk, err := utilityDiskCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + req := disks.SnapshotDeleteRequest{ + DiskID: disk.ID, + Label: d.Get("label").(string), + } + + _, err = c.CloudBroker().Disks().SnapshotDelete(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId("") + + return nil +} + +func ResourceDiskSnapshot() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourceDiskSnapshotCreate, + ReadContext: resourceDiskSnapshotRead, + UpdateContext: resourceDiskSnapshotUpdate, + DeleteContext: resourceDiskSnapshotDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, + }, + + Schema: resourceDiskSnapshotSchemaMake(), + } +} + +func resourceDiskSnapshotSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "disk_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "The unique ID of the subscriber-owner of the disk", + }, + "label": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the snapshot", + }, + "rollback": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Needed in order to make a snapshot rollback", + }, + "timestamp": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Snapshot time", + }, + "guid": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the snapshot", + }, + "reference_id": { + Type: schema.TypeString, + Computed: true, + }, + "res_id": { + Type: schema.TypeString, + Computed: true, + Description: "Reference to the snapshot", + }, + "snap_set_guid": { + Type: schema.TypeString, + Computed: true, + Description: "The set snapshot ID", + }, + "snap_set_time": { + Type: schema.TypeInt, + Computed: true, + Description: "The set time of the snapshot", + }, + } +} diff --git a/internal/service/cloudbroker/disks/utility_disk.go b/internal/service/cloudbroker/disks/utility_disk.go index cbbfb37..842b883 100644 --- a/internal/service/cloudbroker/disks/utility_disk.go +++ b/internal/service/cloudbroker/disks/utility_disk.go @@ -46,9 +46,9 @@ func utilityDiskCheckPresence(ctx context.Context, d *schema.ResourceData, m int c := m.(*controller.ControllerCfg) req := disks.GetRequest{} - if d.Get("disk_id").(int) == 0 { - id, _ := strconv.ParseUint(d.Id(), 10, 64) - req.DiskID = id + if d.Id() != "" { + diskID, _ := strconv.ParseUint(d.Id(), 10, 64) + req.DiskID = diskID } else { req.DiskID = uint64(d.Get("disk_id").(int)) } diff --git a/internal/service/cloudbroker/disks/utility_disk_list.go b/internal/service/cloudbroker/disks/utility_disk_list.go index a5e4742..3320680 100644 --- a/internal/service/cloudbroker/disks/utility_disk_list.go +++ b/internal/service/cloudbroker/disks/utility_disk_list.go @@ -46,17 +46,41 @@ func utilityDiskListCheckPresence(ctx context.Context, d *schema.ResourceData, m c := m.(*controller.ControllerCfg) req := disks.ListRequest{} - if page, ok := d.GetOk("page"); ok { - req.Page = uint64(page.(int)) + if by_id, ok := d.GetOk("by_id"); ok { + req.ByID = uint64(by_id.(int)) } - if size, ok := d.GetOk("size"); ok { - req.Size = uint64(size.(int)) + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + if account_name, ok := d.GetOk("account_name"); ok { + req.AccountName = account_name.(string) + } + if disk_max_size, ok := d.GetOk("disk_max_size"); ok { + req.DiskMaxSize = int64(disk_max_size.(int)) + } + if status, ok := d.GetOk("status"); ok { + req.Status = status.(string) + } + if shared, ok := d.GetOk("shared"); ok { + req.Shared = shared.(bool) + } + if accountId, ok := d.GetOk("account_id"); ok { + req.AccountID = uint64(accountId.(int)) } if diskType, ok := d.GetOk("type"); ok { req.Type = strings.ToUpper(diskType.(string)) } - if accountId, ok := d.GetOk("accountId"); ok { - req.AccountID = uint64(accountId.(int)) + if pool, ok := d.GetOk("pool"); ok { + req.Pool = pool.(string) + } + if sepID, ok := d.GetOk("sep_id"); ok { + req.SEPID = uint64(sepID.(int)) + } + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) } log.Debugf("utilityDiskListCheckPresence: load disk list") diff --git a/internal/service/cloudbroker/disks/utility_disk_list_deleted.go b/internal/service/cloudbroker/disks/utility_disk_list_deleted.go new file mode 100644 index 0000000..90acabb --- /dev/null +++ b/internal/service/cloudbroker/disks/utility_disk_list_deleted.go @@ -0,0 +1,85 @@ +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, +Tim Tkachev, +Nikita Sorokin, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package disks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityDiskListDeletedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*disks.ListDisks, error) { + c := m.(*controller.ControllerCfg) + req := disks.ListDeletedRequest{} + + if by_id, ok := d.GetOk("by_id"); ok { + req.ByID = uint64(by_id.(int)) + } + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + if account_name, ok := d.GetOk("account_name"); ok { + req.AccountName = account_name.(string) + } + if disk_max_size, ok := d.GetOk("disk_max_size"); ok { + req.DiskMaxSize = int64(disk_max_size.(int)) + } + if shared, ok := d.GetOk("shared"); ok { + req.Shared = shared.(bool) + } + if account_id, ok := d.GetOk("account_id"); ok { + req.AccountID = uint64(account_id.(int)) + } + if typev, ok := d.GetOk("type"); ok { + req.Type = typev.(string) + } + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + + log.Debugf("utilityDiskListDeletedCheckPresence: load disk list") + diskList, err := c.CloudBroker().Disks().ListDeleted(ctx, req) + if err != nil { + return nil, err + } + + return diskList, nil +} diff --git a/internal/service/cloudbroker/disks/utility_disk_list_types.go b/internal/service/cloudbroker/disks/utility_disk_list_types.go new file mode 100644 index 0000000..c4499cc --- /dev/null +++ b/internal/service/cloudbroker/disks/utility_disk_list_types.go @@ -0,0 +1,25 @@ +package disks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityDiskListTypesCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*disks.ListTypes, error) { + c := m.(*controller.ControllerCfg) + req := disks.ListTypesRequest{ + Detailed: false, + } + + log.Debugf("utilityDiskListTypesCheckPresence: load disk list Types Detailed") + typesList, err := c.CloudBroker().Disks().ListTypes(ctx, req) + if err != nil { + return nil, err + } + + return typesList, nil +} diff --git a/internal/service/cloudbroker/disks/utility_disk_list_types_detailed.go b/internal/service/cloudbroker/disks/utility_disk_list_types_detailed.go new file mode 100644 index 0000000..de259f1 --- /dev/null +++ b/internal/service/cloudbroker/disks/utility_disk_list_types_detailed.go @@ -0,0 +1,27 @@ +package disks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityDiskListTypesDetailedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*disks.ListTypes, error) { + c := m.(*controller.ControllerCfg) + + log.Debugf("utilityDiskListTypesDetailedCheckPresence: load disk list Types Detailed") + listTypesDetailed, err := c.CloudBroker().Disks().ListTypes(ctx, disks.ListTypesRequest{ + Detailed: true, + }) + + log.Debugf("%+v", listTypesDetailed.Data[0].(map[string]interface{})) + + if err != nil { + return nil, err + } + + return listTypesDetailed, nil +} diff --git a/internal/service/cloudbroker/disks/utility_disk_list_unattached.go b/internal/service/cloudbroker/disks/utility_disk_list_unattached.go new file mode 100644 index 0000000..60409e4 --- /dev/null +++ b/internal/service/cloudbroker/disks/utility_disk_list_unattached.go @@ -0,0 +1,54 @@ +package disks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityDiskListUnattachedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*disks.ListUnattachedDisks, error) { + c := m.(*controller.ControllerCfg) + req := disks.ListUnattachedRequest{} + + if by_id, ok := d.GetOk("by_id"); ok { + req.ByID = uint64(by_id.(int)) + } + if account_name, ok := d.GetOk("account_name"); ok { + req.AccountName = account_name.(string) + } + if disk_max_size, ok := d.GetOk("disk_max_size"); ok { + req.DiskMaxSize = int64(disk_max_size.(int)) + } + if status, ok := d.GetOk("status"); ok { + req.Status = status.(string) + } + if diskType, ok := d.GetOk("type"); ok { + req.Type = diskType.(string) + } + if accountId, ok := d.GetOk("accountId"); ok { + req.AccountID = uint64(accountId.(int)) + } + if sepId, ok := d.GetOk("sep_id"); ok { + req.SEPID = uint64(sepId.(int)) + } + if pool, ok := d.GetOk("pool"); ok { + req.Pool = pool.(string) + } + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + + log.Debugf("utilityDiskListUnattachedCheckPresence: load disk Unattached list") + unattachedList, err := c.CloudBroker().Disks().ListUnattached(ctx, req) + if err != nil { + return nil, err + } + + return unattachedList, nil +} diff --git a/internal/service/cloudbroker/extnet/data_source_extnet.go b/internal/service/cloudbroker/extnet/data_source_extnet.go index 272dabe..d5d8c45 100644 --- a/internal/service/cloudbroker/extnet/data_source_extnet.go +++ b/internal/service/cloudbroker/extnet/data_source_extnet.go @@ -56,7 +56,7 @@ func dataSourceExtnetRead(ctx context.Context, d *schema.ResourceData, m interfa func dataSourceExtnetSchemaMake() map[string]*schema.Schema { return map[string]*schema.Schema{ - "net_id": { + "extnet_id": { Type: schema.TypeInt, Required: true, }, @@ -116,10 +116,6 @@ func dataSourceExtnetSchemaMake() map[string]*schema.Schema { Type: schema.TypeInt, Computed: true, }, - "extnet_id": { - Type: schema.TypeInt, - Computed: true, - }, "ipcidr": { Type: schema.TypeString, Computed: true, @@ -198,6 +194,10 @@ func dataSourceExtnetSchemaMake() map[string]*schema.Schema { Type: schema.TypeString, Computed: true, }, + "desc": { + Type: schema.TypeString, + Computed: true, + }, "domain_name": { Type: schema.TypeString, Computed: true, diff --git a/internal/service/cloudbroker/extnet/data_source_extnet_default.go b/internal/service/cloudbroker/extnet/data_source_extnet_default.go index 6d4fa7d..048b68c 100644 --- a/internal/service/cloudbroker/extnet/data_source_extnet_default.go +++ b/internal/service/cloudbroker/extnet/data_source_extnet_default.go @@ -50,14 +50,14 @@ func dataSourceExtnetDefaultRead(ctx context.Context, d *schema.ResourceData, m id := uuid.New() d.SetId(id.String()) - d.Set("net_id", extnetId) + d.Set("extnet_id", extnetId) return nil } func dataSourceExtnetDefaultSchemaMake() map[string]*schema.Schema { return map[string]*schema.Schema{ - "net_id": { + "extnet_id": { Type: schema.TypeInt, Computed: true, }, diff --git a/internal/service/cloudbroker/extnet/data_source_extnet_static_route.go b/internal/service/cloudbroker/extnet/data_source_extnet_static_route.go new file mode 100644 index 0000000..123ae18 --- /dev/null +++ b/internal/service/cloudbroker/extnet/data_source_extnet_static_route.go @@ -0,0 +1,107 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package extnet + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceStaticRouteRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + staticRoute, err := utilityDataStaticRouteCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.FormatUint(staticRoute.ID, 10)) + flattenStaticRouteData(d, staticRoute) + return nil +} + +func dataSourceStaticRouteSchemaMake() map[string]*schema.Schema { + rets := map[string]*schema.Schema{ + "extnet_id": { + Type: schema.TypeInt, + Required: true, + Description: "Unique ID of the ExtNet", + }, + "route_id": { + Type: schema.TypeInt, + Required: true, + Description: "Unique ID of the static route", + }, + "compute_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "destination": { + Type: schema.TypeString, + Computed: true, + }, + "gateway": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "netmask": { + Type: schema.TypeString, + Computed: true, + }, + } + return rets +} + +func DataSourceStaticRoute() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceStaticRouteRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceStaticRouteSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/extnet/data_source_extnet_static_route_list.go b/internal/service/cloudbroker/extnet/data_source_extnet_static_route_list.go new file mode 100644 index 0000000..34b21e1 --- /dev/null +++ b/internal/service/cloudbroker/extnet/data_source_extnet_static_route_list.go @@ -0,0 +1,121 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package extnet + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceStaticRouteListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + staticRouteList, err := utilityStaticRouteListCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenStaticRouteList(staticRouteList)) + d.Set("entry_count", staticRouteList.EntryCount) + + return nil +} + +func dataSourceStaticRouteListSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "extnet_id": { + Type: schema.TypeInt, + Required: true, + Description: "ID of ExtNet", + }, + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "compute_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "destination": { + Type: schema.TypeString, + Computed: true, + }, + "gateway": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "netmask": { + Type: schema.TypeString, + Computed: true, + }, + "route_id": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + }, + } + return res +} + +func DataSourceStaticRouteList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceStaticRouteListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceStaticRouteListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/extnet/flattens.go b/internal/service/cloudbroker/extnet/flattens.go index 6017540..61a16f4 100644 --- a/internal/service/cloudbroker/extnet/flattens.go +++ b/internal/service/cloudbroker/extnet/flattens.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -46,6 +46,7 @@ func flattenListExtnet(extList *extnet.ListExtNet) []map[string]interface{} { "ckey": item.CKey, "meta": flattens.FlattenMeta(item.Meta), "default": item.Default, + "default_qos": flattenExtnetDefaultQos(item.DefaultQOS), "desc": item.Description, "free_ips": item.FreeIPs, "gid": item.GID, @@ -62,6 +63,7 @@ func flattenListExtnet(extList *extnet.ListExtNet) []map[string]interface{} { "status": item.Status, "vlan_id": item.VLANID, "check_ips": item.CheckIPs, + "vnfs": flattenExtnetVNFS(item.VNFs), } res = append(res, temp) @@ -73,7 +75,6 @@ func flattenListExtnet(extList *extnet.ListExtNet) []map[string]interface{} { func flattenRecordExtnet(d *schema.ResourceData, recNet *extnet.RecordExtNet) { d.Set("ckey", recNet.CKey) d.Set("meta", flattens.FlattenMeta(recNet.Meta)) - d.Set("default", recNet.Default) d.Set("desc", recNet.Description) d.Set("free_ips", recNet.FreeIPs) @@ -96,17 +97,49 @@ func flattenRecordExtnet(d *schema.ResourceData, recNet *extnet.RecordExtNet) { d.Set("gateway", recNet.Gateway) d.Set("network", recNet.Network) d.Set("prefix", recNet.Prefix) + d.Set("default_qos", flattenExtnetDefaultQos(recNet.DefaultQOS)) + d.Set("vnfs", flattenExtnetVNFS(recNet.VNFs)) + d.Set("reservations", flattenExtnetReservations(recNet.Reservations)) +} +func flattenRecordExtnetResource(d *schema.ResourceData, recNet *extnet.RecordExtNet, staticRouteList *extnet.ListStaticRoutes) { + d.Set("ckey", recNet.CKey) + d.Set("meta", flattens.FlattenMeta(recNet.Meta)) + d.Set("default", recNet.Default) + d.Set("desc", recNet.Description) + d.Set("free_ips", recNet.FreeIPs) + d.Set("gid", recNet.GID) + d.Set("guid", recNet.GUID) + d.Set("extnet_id", recNet.ID) + d.Set("ipcidr", recNet.IPCIDR) + d.Set("milestones", recNet.Milestones) + d.Set("name", recNet.Name) + d.Set("network_id", recNet.NetworkID) + d.Set("ovs_bridge", recNet.OVSBridge) + d.Set("pre_reservations_num", recNet.PreReservationsNum) + d.Set("pri_vnfdev_id", recNet.PriVNFDevID) + d.Set("shared_with", recNet.SharedWith) + d.Set("status", recNet.Status) + d.Set("vlan_id", recNet.VLANID) + d.Set("check_ips", recNet.CheckIPs) + d.Set("dns", recNet.DNS) + d.Set("excluded", flattenExtnetExcluded(recNet.Excluded)) + d.Set("gateway", recNet.Gateway) + d.Set("network", recNet.Network) + d.Set("prefix", recNet.Prefix) d.Set("default_qos", flattenExtnetDefaultQos(recNet.DefaultQOS)) d.Set("vnfs", flattenExtnetVNFS(recNet.VNFs)) d.Set("reservations", flattenExtnetReservations(recNet.Reservations)) + d.Set("routes", flattenStaticRouteList(staticRouteList)) } + func flattenExtnetExcluded(ers extnet.ListReservations) []map[string]interface{} { res := make([]map[string]interface{}, 0) for _, er := range ers { temp := map[string]interface{}{ "client_type": er.ClientType, + "desc": er.Description, "domain_name": er.DomainName, "hostname": er.Hostname, "ip": er.IP, @@ -159,3 +192,29 @@ func flattenExtnetDefaultQos(edqos extnet.QOS) []map[string]interface{} { res = append(res, temp) return res } + +func flattenStaticRouteList(sr *extnet.ListStaticRoutes) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(sr.Data)) + for _, staticRoute := range sr.Data { + temp := map[string]interface{}{ + "route_id": staticRoute.ID, + "destination": staticRoute.Destination, + "gateway": staticRoute.Gateway, + "guid": staticRoute.GUID, + "netmask": staticRoute.Netmask, + "compute_ids": staticRoute.ComputeIds, + } + res = append(res, temp) + } + + return res +} + +func flattenStaticRouteData(d *schema.ResourceData, route *extnet.ItemRoutes) { + d.Set("destination", route.Destination) + d.Set("gateway", route.Gateway) + d.Set("guid", route.GUID) + d.Set("netmask", route.Netmask) + d.Set("compute_ids", route.ComputeIds) + d.Set("route_id", route.ID) +} \ No newline at end of file diff --git a/internal/service/cloudbroker/extnet/resource_extnet.go b/internal/service/cloudbroker/extnet/resource_extnet.go index a4dd62e..b11cfef 100644 --- a/internal/service/cloudbroker/extnet/resource_extnet.go +++ b/internal/service/cloudbroker/extnet/resource_extnet.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -43,12 +43,17 @@ import ( "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic" ) func resourceExtnetCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Debugf("cloudbroker: resourceExtnetCreate called with name '%s'", d.Get("name").(string)) c := m.(*controller.ControllerCfg) + if err := ic.ExistGID(ctx, uint64(d.Get("gid").(int)), c); err != nil { + return diag.FromErr(err) + } + req := extnet.CreateRequest{ Name: d.Get("name").(string), GID: uint64(d.Get("gid").(int)), @@ -122,6 +127,7 @@ func resourceExtnetCreate(ctx context.Context, d *schema.ResourceData, m interfa } d.SetId(strconv.FormatUint(netID, 10)) + d.Set("extnet_id", netID) log.Debugf("cloudbroker: Extnet with id %d successfully created on platform", netID) if d.Get("excluded_ips").(*schema.Set).Len() > 0 { @@ -163,8 +169,12 @@ func resourceExtnetRead(ctx context.Context, d *schema.ResourceData, m interface d.SetId("") return diag.FromErr(err) } + staticRouteList, err := utilityStaticRouteListInResourceCheckPresence(ctx, m, recNet.ID) + if err != nil { + return diag.FromErr(err) + } - flattenRecordExtnet(d, recNet) + flattenRecordExtnetResource(d, recNet, staticRouteList) return nil } @@ -219,6 +229,35 @@ func resourceExtnetUpdate(ctx context.Context, d *schema.ResourceData, m interfa } } + if d.HasChange("excluded_ips_range") { + if err := handleExcludedIPsRangeUpdate(ctx, d, c, recNet); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("shared_with") { + if err := handleSharedWithUpdate(ctx, d, c); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("virtual") { + if err := handleVirtualUpdate(ctx, d, c, recNet); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("restart") { + if err := handleRestartUpdate(ctx, d, c, recNet); err != nil { + return diag.FromErr(err) + } + } + if d.HasChange("migrate") { + if err := handleMigrateUpdate(ctx, d, c, recNet); err != nil { + return diag.FromErr(err) + } + } + return resourceExtnetRead(ctx, d, m) } @@ -253,13 +292,13 @@ func resourceExtnetSchemaMake() map[string]*schema.Schema { "ipcidr": { Type: schema.TypeString, Required: true, - ForceNew: true, + // ForceNew: true, Description: "IP network CIDR", }, "vlan_id": { Type: schema.TypeInt, Required: true, - ForceNew: true, + // ForceNew: true, Description: "VLAN ID", }, "gateway": { @@ -352,6 +391,23 @@ func resourceExtnetSchemaMake() map[string]*schema.Schema { }, Description: "IPs to exclude in current extnet pool", }, + "excluded_ips_range": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_start": { + Type: schema.TypeString, + Required: true, + }, + "ip_end": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + Description: "Range of IPs to exclude in current extnet pool", + }, "default_qos": { Type: schema.TypeList, MaxItems: 1, @@ -381,6 +437,15 @@ func resourceExtnetSchemaMake() map[string]*schema.Schema { }, }, }, + "restart":{ + Type: schema.TypeBool, + Optional: true, + Description: "restart extnet vnf device", + }, + "migrate":{ + Type: schema.TypeInt, + Optional: true, + }, "ckey": { Type: schema.TypeString, Computed: true, @@ -488,6 +553,41 @@ func resourceExtnetSchemaMake() map[string]*schema.Schema { Type: schema.TypeInt, Computed: true, }, + "routes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "compute_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "destination": { + Type: schema.TypeString, + Computed: true, + }, + "gateway": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "netmask": { + Type: schema.TypeString, + Computed: true, + }, + "route_id": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, "reservations": { Type: schema.TypeList, Computed: true, @@ -540,6 +640,10 @@ func ResourceExtnetCB() *schema.Resource { UpdateContext: resourceExtnetUpdate, DeleteContext: resourceExtnetDelete, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + Timeouts: &schema.ResourceTimeout{ Read: &constants.Timeout300s, Create: &constants.Timeout300s, diff --git a/internal/service/cloudbroker/extnet/resource_extnet_static_route.go b/internal/service/cloudbroker/extnet/resource_extnet_static_route.go new file mode 100644 index 0000000..fd1218f --- /dev/null +++ b/internal/service/cloudbroker/extnet/resource_extnet_static_route.go @@ -0,0 +1,259 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package extnet + +import ( + "context" + "fmt" + "strconv" + "strings" + + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceStaticRouteCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*controller.ControllerCfg) + + if err := ic.ExistExtNet(ctx, uint64(d.Get("extnet_id").(int)), c); err != nil { + return diag.FromErr(err) + } + + req := extnet.StaticRouteAddRequest{ + ExtNetId: uint64(d.Get("extnet_id").(int)), + Destination: d.Get("destination").(string), + Netmask: d.Get("netmask").(string), + Gateway: d.Get("gateway").(string), + } + + if computesIDS, ok := d.GetOk("compute_ids"); ok { + ids := computesIDS.([]interface{}) + + res := make([]uint64, 0, len (ids)) + + for _, id := range ids { + computeId := uint64(id.(int)) + res = append(res, computeId) + } + + req.ComputeIds = res + } + + _, err := c.CloudBroker().ExtNet().StaticRouteAdd(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + staticRouteData, err := getStaticRouteData(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(fmt.Sprintf("%d#%d", req.ExtNetId, staticRouteData.ID)) + + return resourceStaticRouteRead(ctx, d, m) +} + +func resourceStaticRouteRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + warnings := dc.Warnings{} + + staticRouteData, err := utilityDataStaticRouteCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + flattenStaticRouteData(d, staticRouteData) + + return warnings.Get() +} + +func resourceStaticRouteUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*controller.ControllerCfg) + warnings := dc.Warnings{} + + if err := ic.ExistExtNet(ctx, uint64(d.Get("extnet_id").(int)), c); err != nil { + return diag.FromErr(err) + } + + staticRouteData, err := utilityDataStaticRouteCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + if d.HasChange("compute_ids") { + deletedIds := make([]uint64, 0) + addedIds := make([]uint64, 0) + + oldComputeIds, newComputeIds := d.GetChange("compute_ids") + oldComputeIdsSlice := oldComputeIds.([]interface{}) + newComputeIdsSlice := newComputeIds.([]interface{}) + + for _, el := range oldComputeIdsSlice { + if !isContainsIds(newComputeIdsSlice, el) { + convertedEl := uint64(el.(int)) + deletedIds = append(deletedIds, convertedEl) + } + } + + for _, el := range newComputeIdsSlice { + if !isContainsIds(oldComputeIdsSlice, el) { + convertedEl := uint64(el.(int)) + addedIds = append(addedIds, convertedEl) + } + } + + if len(deletedIds) > 0 { + req := extnet.StaticRouteAccessRevokeRequest{ + ExtNetID: uint64(d.Get("extnet_id").(int)), + RouteId: staticRouteData.ID, + ComputeIds: deletedIds, + } + + _, err := c.CloudBroker().ExtNet().StaticRouteAccessRevoke(ctx, req) + if err != nil { + warnings.Add(err) + } + } + + if len(addedIds) > 0 { + req := extnet.StaticRouteAccessGrantRequest{ + ExtNetID: uint64(d.Get("extnet_id").(int)), + RouteId: staticRouteData.ID, + ComputeIds: addedIds, + } + + _, err := c.CloudBroker().ExtNet().StaticRouteAccessGrant(ctx, req) + if err != nil { + warnings.Add(err) + } + } + } + + return append(warnings.Get(), resourceStaticRouteRead(ctx, d, m)...) +} + +func resourceStaticRouteDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*controller.ControllerCfg) + req := extnet.StaticRouteDelRequest{} + + arr := strings.Split(d.Id(), "#") + if len(arr) != 2 { + return diag.Errorf("broken state id - %s", d.Id()) + } + + req.ExtNetID, _ = strconv.ParseUint(arr[0], 10, 64) + req.RouteId, _ = strconv.ParseUint(arr[1], 10, 64) + + _, err := c.CloudBroker().ExtNet().StaticRouteDel(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId("") + + return nil +} + +func resourceStaticRouteSchemaMake() map[string]*schema.Schema { + rets := dataSourceStaticRouteSchemaMake() + rets["route_id"] = &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Optional: true, + } + rets["compute_ids"] = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + } + rets["destination"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + } + rets["gateway"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + } + rets["netmask"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + } + + return rets +} + +func isContainsIds(els []interface{}, el interface{}) bool { + convEl := el.(int) + for _, elOld := range els { + if convEl == elOld.(int) { + return true + } + } + return false +} + +func ResourceStaticRoute() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourceStaticRouteCreate, + ReadContext: resourceStaticRouteRead, + UpdateContext: resourceStaticRouteUpdate, + DeleteContext: resourceStaticRouteDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout20m, + Read: &constants.Timeout600s, + Update: &constants.Timeout20m, + Delete: &constants.Timeout600s, + Default: &constants.Timeout600s, + }, + + Schema: resourceStaticRouteSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/extnet/utility_extnet.go b/internal/service/cloudbroker/extnet/utility_extnet.go index 383ba93..3eb6a91 100644 --- a/internal/service/cloudbroker/extnet/utility_extnet.go +++ b/internal/service/cloudbroker/extnet/utility_extnet.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -38,245 +38,20 @@ import ( "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - log "github.com/sirupsen/logrus" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" ) -func handleExcludedIPsUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { - old_set, new_set := d.GetChange("excluded_ips") - - detach_set := old_set.(*schema.Set).Difference(new_set.(*schema.Set)) - if detach_set.Len() > 0 { - ips := make([]string, 0) - for _, detach_ip := range detach_set.List() { - ips = append(ips, detach_ip.(string)) - } - - log.Debugf("cloudbroker: removing %d IP address(es) from excluded list", detach_set.Len()) - req := extnet.IPsIncludeRequest{ - NetID: recNet.ID, - IPs: ips, - } - - _, err := c.CloudBroker().ExtNet().IPsInclude(ctx, req) - if err != nil { - return err - } - } - - attach_set := new_set.(*schema.Set).Difference(old_set.(*schema.Set)) - if attach_set.Len() > 0 { - ips := make([]string, 0) - for _, attach_ip := range attach_set.List() { - ips = append(ips, attach_ip.(string)) - } - - log.Debugf("cloudbroker: excluding %d IP address(es) from extnet with id %d", attach_set.Len(), recNet.ID) - req := extnet.IPsExcludeRequest{ - NetID: recNet.ID, - IPs: ips, - } - - _, err := c.CloudBroker().ExtNet().IPsExclude(ctx, req) - if err != nil { - return err - } - } - - return nil -} - -func handleSetDefault(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { - set_default := d.Get("set_default").(bool) - if set_default && recNet.Default == false { - req := extnet.SetDefaultRequest{ - NetID: recNet.ID, - } - - _, err := c.CloudBroker().ExtNet().SetDefault(ctx, req) - if err != nil { - return err - } - } - - return nil -} - -func handleBasicUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { - doBasicUpdate := false - basiUpdateReq := extnet.UpdateRequest{NetID: recNet.ID} - - if d.HasChange("name") { - basiUpdateReq.Name = d.Get("name").(string) - doBasicUpdate = true - } - if d.HasChange("desc") { - basiUpdateReq.Description = d.Get("desc").(string) - doBasicUpdate = true - } - - if doBasicUpdate { - _, err := c.CloudBroker().ExtNet().Update(ctx, basiUpdateReq) - if err != nil { - return err - } - } - - return nil -} - -func handleEnableUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { - enable := d.Get("enable").(bool) - if enable { - if recNet.Status == status.Disabled { - req := extnet.EnableRequest{NetID: recNet.ID} - _, err := c.CloudBroker().ExtNet().Enable(ctx, req) - if err != nil { - return err - } - } - } else { - if recNet.Status == status.Enabled { - req := extnet.DisableRequest{NetID: recNet.ID} - _, err := c.CloudBroker().ExtNet().Disable(ctx, req) - if err != nil { - return err - } - } - } - return nil -} - -func handleDefaultQOSUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { - qos := d.Get("default_qos").([]interface{})[0].(map[string]interface{}) - - req := extnet.DefaultQOSUpdateRequest{ - NetID: recNet.ID, - IngressRate: uint64(qos["in_rate"].(int)), - IngressBurst: uint64(qos["in_burst"].(int)), - EgressRate: uint64(qos["e_rate"].(int)), - } - - _, err := c.CloudBroker().ExtNet().DefaultQOSUpdate(ctx, req) - if err != nil { - return err - } - - return nil -} - -func handleNTPUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { - changed_list := d.Get("ntp").([]interface{}) - - ntp_list := make([]string, 0) - for _, ntp_address := range changed_list { - ntp_list = append(ntp_list, ntp_address.(string)) - } - - req := extnet.NTPApplyRequest{ - NetID: recNet.ID, - NTPList: ntp_list, - } - - _, err := c.CloudBroker().ExtNet().NTPApply(ctx, req) - if err != nil { - return err - } - - return nil -} - -func handleDNSUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { - changed_list := d.Get("dns").([]interface{}) - - dns_list := make([]string, 0) - for _, dns_address := range changed_list { - dns_list = append(dns_list, dns_address.(string)) - } - - req := extnet.DNSApplyRequest{ - NetID: recNet.ID, - DNSList: dns_list, - } - - _, err := c.CloudBroker().ExtNet().DNSApply(ctx, req) - if err != nil { - return err - } - - return nil -} - -func utilityExtnetDefaultCheckPresence(ctx context.Context, m interface{}) (uint64, error) { - c := m.(*controller.ControllerCfg) - - return c.CloudBroker().ExtNet().GetDefault(ctx) -} - -func utilityExtnetListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*extnet.ListExtNet, error) { - c := m.(*controller.ControllerCfg) - - req := extnet.ListRequest{} - - if accountId, ok := d.GetOk("account_id"); ok { - req.AccountID = uint64(accountId.(int)) - } - - if by_id, ok := d.GetOk("by_id"); ok { - req.ByID = uint64(by_id.(int)) - } - - if name, ok := d.GetOk("name"); ok { - req.Name = name.(string) - } - - if network, ok := d.GetOk("network"); ok { - req.Network = network.(string) - } - - if vlan_id, ok := d.GetOk("vlan_id"); ok { - req.VLANID = uint64(vlan_id.(int)) - } - - if vnfdev_id, ok := d.GetOk("vnfdev_id"); ok { - req.VNFDevID = uint64(vnfdev_id.(int)) - } - - if status, ok := d.GetOk("status"); ok { - req.Status = status.(string) - } - - if page, ok := d.GetOk("page"); ok { - req.Page = uint64(page.(int)) - } - - if size, ok := d.GetOk("size"); ok { - req.Size = uint64(size.(int)) - } - - res, err := c.CloudBroker().ExtNet().List(ctx, req) - if err != nil { - return nil, err - } - - return res, nil -} - func utilityExtnetCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*extnet.RecordExtNet, error) { c := m.(*controller.ControllerCfg) - var netId uint64 - if id, ok := d.GetOk("net_id"); ok { - netId = uint64(id.(int)) - } else { - parsed, _ := strconv.ParseUint(d.Id(), 10, 64) - netId = parsed - } + req := extnet.GetRequest{} - req := extnet.GetRequest{ - NetID: netId, + if d.Id() != "" { + netId, _ := strconv.ParseUint(d.Id(), 10, 64) + req.NetID = netId + } else { + req.NetID = uint64(d.Get("extnet_id").(int)) } res, err := c.CloudBroker().ExtNet().Get(ctx, req) diff --git a/internal/service/cloudbroker/account/account_disk_ds_subresource.go b/internal/service/cloudbroker/extnet/utility_extnet_default.go similarity index 60% rename from internal/service/cloudbroker/account/account_disk_ds_subresource.go rename to internal/service/cloudbroker/extnet/utility_extnet_default.go index c7d2f19..5665555 100644 --- a/internal/service/cloudbroker/account/account_disk_ds_subresource.go +++ b/internal/service/cloudbroker/extnet/utility_extnet_default.go @@ -1,63 +1,54 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - -func dataSourceAccountDiskSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "disk_id": { - Type: schema.TypeInt, - Computed: true, - }, - "disk_name": { - Type: schema.TypeString, - Computed: true, - }, - "pool_name": { - Type: schema.TypeString, - Computed: true, - }, - "sep_id": { - Type: schema.TypeInt, - Computed: true, - }, - "size_max": { - Type: schema.TypeInt, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - } -} +/* +<<<<<<<< HEAD:internal/dc/utils.go +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +======== +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +>>>>>>>> dev:internal/service/cloudbroker/extnet/utility_extnet_default.go +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, +<<<<<<<< HEAD:internal/dc/utils.go +Nikita Sorokin, +======== +Tim Tkachev, +>>>>>>>> dev:internal/service/cloudbroker/extnet/utility_extnet_default.go + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package extnet + +import ( + "context" + + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityExtnetDefaultCheckPresence(ctx context.Context, m interface{}) (uint64, error) { + c := m.(*controller.ControllerCfg) + + return c.CloudBroker().ExtNet().GetDefault(ctx) +} diff --git a/internal/service/cloudbroker/extnet/utility_extnet_list.go b/internal/service/cloudbroker/extnet/utility_extnet_list.go new file mode 100644 index 0000000..f4305e3 --- /dev/null +++ b/internal/service/cloudbroker/extnet/utility_extnet_list.go @@ -0,0 +1,91 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, +Tim Tkachev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package extnet + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityExtnetListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*extnet.ListExtNet, error) { + c := m.(*controller.ControllerCfg) + + req := extnet.ListRequest{} + + if accountId, ok := d.GetOk("account_id"); ok { + req.AccountID = uint64(accountId.(int)) + } + + if by_id, ok := d.GetOk("by_id"); ok { + req.ByID = uint64(by_id.(int)) + } + + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + + if network, ok := d.GetOk("network"); ok { + req.Network = network.(string) + } + + if vlan_id, ok := d.GetOk("vlan_id"); ok { + req.VLANID = uint64(vlan_id.(int)) + } + + if vnfdev_id, ok := d.GetOk("vnfdev_id"); ok { + req.VNFDevID = uint64(vnfdev_id.(int)) + } + + if status, ok := d.GetOk("status"); ok { + req.Status = status.(string) + } + + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + + res, err := c.CloudBroker().ExtNet().List(ctx, req) + if err != nil { + return nil, err + } + + return res, nil +} diff --git a/internal/service/cloudbroker/extnet/utility_extnet_resource.go b/internal/service/cloudbroker/extnet/utility_extnet_resource.go new file mode 100644 index 0000000..80e15c5 --- /dev/null +++ b/internal/service/cloudbroker/extnet/utility_extnet_resource.go @@ -0,0 +1,360 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, +Tim Tkachev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package extnet + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" +) + +func handleExcludedIPsUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { + old_set, new_set := d.GetChange("excluded_ips") + + detach_set := old_set.(*schema.Set).Difference(new_set.(*schema.Set)) + if detach_set.Len() > 0 { + ips := make([]string, 0) + for _, detach_ip := range detach_set.List() { + ips = append(ips, detach_ip.(string)) + } + + log.Debugf("cloudbroker: removing %d IP address(es) from excluded list", detach_set.Len()) + req := extnet.IPsIncludeRequest{ + NetID: recNet.ID, + IPs: ips, + } + + _, err := c.CloudBroker().ExtNet().IPsInclude(ctx, req) + if err != nil { + return err + } + } + + attach_set := new_set.(*schema.Set).Difference(old_set.(*schema.Set)) + if attach_set.Len() > 0 { + ips := make([]string, 0) + for _, attach_ip := range attach_set.List() { + ips = append(ips, attach_ip.(string)) + } + + log.Debugf("cloudbroker: excluding %d IP address(es) from extnet with id %d", attach_set.Len(), recNet.ID) + req := extnet.IPsExcludeRequest{ + NetID: recNet.ID, + IPs: ips, + } + + _, err := c.CloudBroker().ExtNet().IPsExclude(ctx, req) + if err != nil { + return err + } + } + + return nil +} + +func handleSetDefault(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { + set_default := d.Get("set_default").(bool) + if set_default && !recNet.Default { + req := extnet.SetDefaultRequest{ + NetID: recNet.ID, + } + + _, err := c.CloudBroker().ExtNet().SetDefault(ctx, req) + if err != nil { + return err + } + } + + return nil +} + +func handleBasicUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { + doBasicUpdate := false + basiUpdateReq := extnet.UpdateRequest{NetID: recNet.ID} + + if d.HasChange("name") { + basiUpdateReq.Name = d.Get("name").(string) + doBasicUpdate = true + } + if d.HasChange("desc") { + basiUpdateReq.Description = d.Get("desc").(string) + doBasicUpdate = true + } + + if doBasicUpdate { + _, err := c.CloudBroker().ExtNet().Update(ctx, basiUpdateReq) + if err != nil { + return err + } + } + + return nil +} + +func handleEnableUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { + enable := d.Get("enable").(bool) + if enable { + if recNet.Status == status.Disabled { + req := extnet.EnableRequest{NetID: recNet.ID} + _, err := c.CloudBroker().ExtNet().Enable(ctx, req) + if err != nil { + return err + } + } + } else { + if recNet.Status == status.Enabled { + req := extnet.DisableRequest{NetID: recNet.ID} + _, err := c.CloudBroker().ExtNet().Disable(ctx, req) + if err != nil { + return err + } + } + } + + return nil +} + +func handleDefaultQOSUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { + qos := d.Get("default_qos").([]interface{})[0].(map[string]interface{}) + + req := extnet.DefaultQOSUpdateRequest{ + NetID: recNet.ID, + IngressRate: uint64(qos["in_rate"].(int)), + IngressBurst: uint64(qos["in_burst"].(int)), + EgressRate: uint64(qos["e_rate"].(int)), + } + + _, err := c.CloudBroker().ExtNet().DefaultQOSUpdate(ctx, req) + if err != nil { + return err + } + + return nil +} + +func handleNTPUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { + changed_list := d.Get("ntp").([]interface{}) + + ntp_list := make([]string, 0) + for _, ntp_address := range changed_list { + ntp_list = append(ntp_list, ntp_address.(string)) + } + + req := extnet.NTPApplyRequest{ + NetID: recNet.ID, + NTPList: ntp_list, + } + + _, err := c.CloudBroker().ExtNet().NTPApply(ctx, req) + if err != nil { + return err + } + + return nil +} + +func handleDNSUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { + changed_list := d.Get("dns").([]interface{}) + + dns_list := make([]string, 0) + for _, dns_address := range changed_list { + dns_list = append(dns_list, dns_address.(string)) + } + + req := extnet.DNSApplyRequest{ + NetID: recNet.ID, + DNSList: dns_list, + } + + _, err := c.CloudBroker().ExtNet().DNSApply(ctx, req) + if err != nil { + return err + } + + return nil +} + +func handleExcludedIPsRangeUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { + old_set, new_set := d.GetChange("excluded_ips_range") + + detach_set := old_set.(*schema.Set).Difference(new_set.(*schema.Set)) + if detach_set.Len() > 0 { + for _, detach_ip_range := range detach_set.List() { + + log.Debugf("cloudbroker: removing range of IP addreses from excluded range list") + + req := extnet.IPsIncludeRangeRequest{ + NetID: recNet.ID, + IPStart: detach_ip_range.(map[string]interface{})["ip_start"].(string), + IPEnd: detach_ip_range.(map[string]interface{})["ip_end"].(string), + } + + _, err := c.CloudBroker().ExtNet().IPsIncludeRange(ctx, req) + if err != nil { + return err + } + } + } + attach_set := new_set.(*schema.Set).Difference(old_set.(*schema.Set)) + if attach_set.Len() > 0 { + for _, attach_ip_range := range attach_set.List() { + + log.Debugf("cloudbroker: excluding range of IP addreses from excluded range list") + + req := extnet.IPsExcludeRangeRequest{ + NetID: recNet.ID, + IPStart: attach_ip_range.(map[string]interface{})["ip_start"].(string), + IPEnd: attach_ip_range.(map[string]interface{})["ip_end"].(string), + } + + _, err := c.CloudBroker().ExtNet().IPsExcludeRange(ctx, req) + if err != nil { + return err + } + } + } + return nil +} + +func handleSharedWithUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) error { + deletedIds := make([]uint64, 0) + addedIds := make([]uint64, 0) + + oldAccountIds, newAccountIds := d.GetChange("shared_with") + oldAccountIdsSlice := oldAccountIds.([]interface{}) + newAccountIdsSlice := newAccountIds.([]interface{}) + + for _, el := range oldAccountIdsSlice { + if !isContainsIds(newAccountIdsSlice, el) { + convertedEl := uint64(el.(int)) + deletedIds = append(deletedIds, convertedEl) + } + } + + for _, el := range newAccountIdsSlice { + if !isContainsIds(oldAccountIdsSlice, el) { + convertedEl := uint64(el.(int)) + addedIds = append(addedIds, convertedEl) + } + } + + if len(deletedIds) > 0 { + for _, accountId := range deletedIds { + req := extnet.AccessRemoveRequest{ + NetID: uint64(d.Get("extnet_id").(int)), + AccountID: accountId, + } + + _, err := c.CloudBroker().ExtNet().AccessRemove(ctx, req) + if err != nil { + return err + } + } + } + + if len(addedIds) > 0 { + for _, accountId := range addedIds { + req := extnet.AccessAddRequest{ + NetID: uint64(d.Get("extnet_id").(int)), + AccountID: accountId, + } + + _, err := c.CloudBroker().ExtNet().AccessAdd(ctx, req) + if err != nil { + return err + } + } + + } + + return nil +} + +func handleVirtualUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { + virtualOld, virtualNew := d.GetChange("virtual") + + if virtualOld == false && virtualNew == true { + req := extnet.DeviceRemoveRequest{NetID: recNet.ID} + _, err := c.CloudBroker().ExtNet().DeviceRemove(ctx, req) + if err != nil { + return err + } + + } else if virtualOld == true && virtualNew == false { + req := extnet.DeviceDeployRequest{NetID: recNet.ID} + _, err := c.CloudBroker().ExtNet().DeviceDeploy(ctx, req) + if err != nil { + return err + } + } + return nil +} + +func handleRestartUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { + restartOld, restartNew := d.GetChange("restart") + + if restartOld == false && restartNew == true { + req := extnet.DeviceRestartRequest{NetID: recNet.ID} + _, err := c.CloudBroker().ExtNet().DeviceRestart(ctx, req) + if err != nil { + return err + } + } + return nil +} + +func handleMigrateUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { + stackId := uint64(d.Get("migrate").(int)) + + if err := ic.ExistStack(ctx, stackId, c); err != nil { + return err + } + + req := extnet.DeviceMigrateRequest{ + NetID: recNet.ID, + StackID: stackId, + } + + _, err := c.CloudBroker().ExtNet().DeviceMigrate(ctx, req) + if err != nil { + return err + } + + return nil +} diff --git a/internal/service/cloudbroker/extnet/utility_extnet_static_route.go b/internal/service/cloudbroker/extnet/utility_extnet_static_route.go new file mode 100644 index 0000000..47a0308 --- /dev/null +++ b/internal/service/cloudbroker/extnet/utility_extnet_static_route.go @@ -0,0 +1,108 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package extnet + +import ( + "context" + "fmt" + "strconv" + "strings" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityDataStaticRouteCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*extnet.ItemRoutes, error) { + c := m.(*controller.ControllerCfg) + req := extnet.StaticRouteListRequest{} + var routeId uint64 + + if d.Id() != "" { + arr := strings.Split(d.Id(), "#") + if len(arr) != 2 { + return nil, fmt.Errorf("broken state id") + } + + req.ExtNetID, _ = strconv.ParseUint(arr[0], 10, 64) + routeId, _ = strconv.ParseUint(arr[1], 10, 64) + } else { + req.ExtNetID = uint64(d.Get("extnet_id").(int)) + routeId = uint64(d.Get("route_id").(int)) + } + + log.Debugf("utilityStaticRouteCheckPresence, extnet id: %v", req.ExtNetID) + staticRouteList, err := c.CloudBroker().ExtNet().StaticRouteList(ctx, req) + if err != nil { + return nil, err + } + + log.Debugf("utilityStaticRouteCheckPresence: ROUTE ID %v", routeId) + + staticRoute := extnet.ItemRoutes{} + for _, route := range staticRouteList.Data { + if routeId == route.ID { + staticRoute = route + return &staticRoute, nil + } + } + + return nil, fmt.Errorf("static route not found") +} + +func getStaticRouteData(ctx context.Context, d *schema.ResourceData, m interface{}) (*extnet.ItemRoutes, error) { + c := m.(*controller.ControllerCfg) + req := extnet.StaticRouteListRequest{} + req.ExtNetID = uint64(d.Get("extnet_id").(int)) + + staticRouteList, err := c.CloudBroker().ExtNet().StaticRouteList(ctx, req) + if err != nil { + return nil, err + } + + destination := d.Get("destination").(string) + gateway := d.Get("gateway").(string) + + staticRoute := extnet.ItemRoutes{} + for _, route := range staticRouteList.Data { + if destination == route.Destination && gateway == route.Gateway { + staticRoute = route + return &staticRoute, nil + } + } + + return nil, fmt.Errorf("static route not found") +} diff --git a/internal/service/cloudbroker/extnet/utility_extnet_static_route_list.go b/internal/service/cloudbroker/extnet/utility_extnet_static_route_list.go new file mode 100644 index 0000000..c708e09 --- /dev/null +++ b/internal/service/cloudbroker/extnet/utility_extnet_static_route_list.go @@ -0,0 +1,73 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package extnet + +import ( + "context" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityStaticRouteListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*extnet.ListStaticRoutes, error) { + c := m.(*controller.ControllerCfg) + req := extnet.StaticRouteListRequest{} + + req.ExtNetID = uint64(d.Get("extnet_id").(int)) + + log.Debugf("utilityStaticRouteListCheckPresence") + staticRouteList, err := c.CloudBroker().ExtNet().StaticRouteList(ctx, req) + if err != nil { + return nil, err + } + + return staticRouteList, nil +} + +func utilityStaticRouteListInResourceCheckPresence(ctx context.Context, m interface{}, extnetId uint64) (*extnet.ListStaticRoutes, error) { + c := m.(*controller.ControllerCfg) + req := extnet.StaticRouteListRequest{ + ExtNetID: extnetId, + } + + log.Debugf("utilityStaticRouteListInResourceCheckPresence") + staticRouteList, err := c.CloudBroker().ExtNet().StaticRouteList(ctx, req) + if err != nil { + return nil, err + } + + return staticRouteList, nil +} \ No newline at end of file diff --git a/internal/service/cloudbroker/flipgroup/data_source_flipgroup.go b/internal/service/cloudbroker/flipgroup/data_source_flipgroup.go new file mode 100644 index 0000000..51db9d8 --- /dev/null +++ b/internal/service/cloudbroker/flipgroup/data_source_flipgroup.go @@ -0,0 +1,219 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, +Tim Tkachev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package flipgroup + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceFlipgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + flipgroup, err := utilityFlipgroupCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + flattenFlipgroup(d, flipgroup) + d.SetId(strconv.Itoa(d.Get("flipgroup_id").(int))) + + return nil +} + +func dataSourceFlipgroupSchemaMake() map[string]*schema.Schema { + rets := map[string]*schema.Schema{ + "flipgroup_id": { + Type: schema.TypeInt, + Required: true, + Description: "flipgroup_id", + }, + "account_id": { + Type: schema.TypeInt, + Computed: true, + Description: "account_id", + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + Description: "account_name", + }, + "client_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + Description: "client_ids", + }, + "client_names": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "client_names", + }, + "client_type": { + Type: schema.TypeString, + Computed: true, + Description: "client_type", + }, + "conn_id": { + Type: schema.TypeInt, + Computed: true, + Description: "conn_id", + }, + "conn_type": { + Type: schema.TypeString, + Computed: true, + Description: "conn_type", + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + Description: "created_by", + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + Description: "created_time", + }, + "default_gw": { + Type: schema.TypeString, + Computed: true, + Description: "default_gw", + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + Description: "deleted_by", + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + Description: "deleted_time", + }, + "description": { + Type: schema.TypeString, + Computed: true, + Description: "description", + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + Description: "gid", + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + Description: "guid", + }, + "ip": { + Type: schema.TypeString, + Computed: true, + Description: "ip", + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + Description: "milestones", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "name", + }, + "net_id": { + Type: schema.TypeInt, + Computed: true, + Description: "net_id", + }, + "net_type": { + Type: schema.TypeString, + Computed: true, + Description: "net_type", + }, + "network": { + Type: schema.TypeString, + Computed: true, + Description: "network", + }, + "rg_id": { + Type: schema.TypeInt, + Computed: true, + Description: "rg_id", + }, + "rg_name": { + Type: schema.TypeString, + Computed: true, + Description: "rg_name", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "status", + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + Description: "updated_by", + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + Description: "updated_time", + }, + } + return rets +} + +func DataSourceFlipgroup() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceFlipgroupRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceFlipgroupSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/flipgroup/data_sourse_flipgroup_list.go b/internal/service/cloudbroker/flipgroup/data_sourse_flipgroup_list.go new file mode 100644 index 0000000..88693bf --- /dev/null +++ b/internal/service/cloudbroker/flipgroup/data_sourse_flipgroup_list.go @@ -0,0 +1,243 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, +Tim Tkachev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package flipgroup + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceFlipgroupListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + flipgroupsList, err := utilityFlipgroupListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenFlipgroupsList(flipgroupsList)) + d.Set("entry_count", flipgroupsList.EntryCount) + + return nil +} + +func dataSourceFlipgroupItemSchemaMake() map[string]*schema.Schema { + rets := map[string]*schema.Schema{ + "ckey": { + Type: schema.TypeString, + Computed: true, + Description: "ckey", + }, + "meta": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "account_id": { + Type: schema.TypeInt, + Computed: true, + Description: "account_id", + }, + "client_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + Description: "client_ids", + }, + "client_type": { + Type: schema.TypeString, + Computed: true, + Description: "client_type", + }, + "conn_id": { + Type: schema.TypeInt, + Computed: true, + Description: "conn_id", + }, + "conn_type": { + Type: schema.TypeString, + Computed: true, + Description: "conn_type", + }, + "default_gw": { + Type: schema.TypeString, + Computed: true, + Description: "default_gw", + }, + "description": { + Type: schema.TypeString, + Computed: true, + Description: "description", + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + Description: "gid", + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + Description: "guid", + }, + "flipgroup_id": { + Type: schema.TypeInt, + Computed: true, + Description: "flipgroup_id", + }, + "ip": { + Type: schema.TypeString, + Computed: true, + Description: "ip", + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + Description: "milestones", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "name", + }, + "net_id": { + Type: schema.TypeInt, + Computed: true, + Description: "net_id", + }, + "net_type": { + Type: schema.TypeString, + Computed: true, + Description: "net_type", + }, + "net_mask": { + Type: schema.TypeInt, + Computed: true, + Description: "net_mask", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "status", + }, + } + return rets +} + +func dataSourceFlipgroupsListSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: "name", + }, + "vins_id": { + Type: schema.TypeInt, + Optional: true, + Description: "vins_id", + }, + "vins_name": { + Type: schema.TypeString, + Optional: true, + Description: "vins_name", + }, + "extnet_id": { + Type: schema.TypeInt, + Optional: true, + Description: "extnet_id", + }, + "by_ip": { + Type: schema.TypeString, + Optional: true, + Description: "by_ip", + }, + "rg_id": { + Type: schema.TypeInt, + Optional: true, + Description: "rg_id", + }, + "by_id": { + Type: schema.TypeInt, + Optional: true, + Description: "by_id", + }, + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "Page number", + }, + "size": { + Type: schema.TypeInt, + Optional: true, + Description: "Page size", + }, + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: dataSourceFlipgroupItemSchemaMake(), + }, + }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + Description: "entry_count", + }, + } + return res +} + +func DataSourceFlipgroupList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceFlipgroupListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceFlipgroupsListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/flipgroup/flattens.go b/internal/service/cloudbroker/flipgroup/flattens.go new file mode 100644 index 0000000..4c35c5c --- /dev/null +++ b/internal/service/cloudbroker/flipgroup/flattens.go @@ -0,0 +1,100 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, +Tim Tkachev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package flipgroup + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/flipgroup" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" +) + +func flattenFlipgroup(d *schema.ResourceData, flip *flipgroup.RecordFLIPGroup) { + d.Set("flipgroup_id", flip.ID) + d.Set("account_id", flip.AccountID) + d.Set("account_name", flip.AccountName) + d.Set("client_ids", flip.ClientIDs) + d.Set("client_names", flip.ClientNames) + d.Set("client_type", flip.ClientType) + d.Set("conn_id", flip.ConnID) + d.Set("conn_type", flip.ConnType) + d.Set("created_by", flip.CreatedBy) + d.Set("created_time", flip.CreatedTime) + d.Set("default_gw", flip.DefaultGW) + d.Set("deleted_by", flip.DeletedBy) + d.Set("deleted_time", flip.DeletedTime) + d.Set("description", flip.Description) + d.Set("gid", flip.GID) + d.Set("guid", flip.GUID) + d.Set("ip", flip.IP) + d.Set("milestones", flip.Milestones) + d.Set("name", flip.Name) + d.Set("net_id", flip.NetID) + d.Set("net_type", flip.NetType) + d.Set("network", flip.Network) + d.Set("rg_id", flip.RGID) + d.Set("rg_name", flip.RGName) + d.Set("status", flip.Status) + d.Set("updated_by", flip.UpdatedBy) + d.Set("updated_time", flip.UpdatedTime) +} + +func flattenFlipgroupsList(fg *flipgroup.ListFLIPGroups) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(fg.Data)) + for _, flip := range fg.Data { + temp := map[string]interface{}{ + "ckey": flip.CKey, + "meta": flattens.FlattenMeta(flip.Meta), + "flipgroup_id": flip.ID, + "account_id": flip.AccountID, + "client_ids": flip.ClientIDs, + "client_type": flip.ClientType, + "conn_id": flip.ConnID, + "conn_type": flip.ConnType, + "default_gw": flip.DefaultGW, + "description": flip.Description, + "gid": flip.GID, + "guid": flip.GUID, + "ip": flip.IP, + "milestones": flip.Milestones, + "name": flip.Name, + "net_id": flip.NetID, + "net_type": flip.NetType, + "net_mask": flip.NetMask, + "status": flip.Status, + } + res = append(res, temp) + } + return res +} diff --git a/internal/service/cloudbroker/flipgroup/resource_check_input_values.go b/internal/service/cloudbroker/flipgroup/resource_check_input_values.go new file mode 100644 index 0000000..a17bb5c --- /dev/null +++ b/internal/service/cloudbroker/flipgroup/resource_check_input_values.go @@ -0,0 +1,70 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, +Tim Tkachev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package flipgroup + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" +) + +func existAccountID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + accountId := uint64(d.Get("account_id").(int)) + req := account.ListRequest{} + + accountList, err := c.CloudBroker().Account().List(ctx, req) + if err != nil { + return false, err + } + + return len(accountList.FilterByID(accountId).Data) != 0, nil +} + +func existNetID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + netID := uint64(d.Get("net_id").(int)) + req := vins.ListRequest {} + + vinsList, err := c.CloudBroker().VINS().List(ctx, req) + if err != nil { + return false, err + } + + return len(vinsList.FilterByID(netID).Data) != 0, nil +} diff --git a/internal/service/cloudbroker/flipgroup/resource_flipgroup.go b/internal/service/cloudbroker/flipgroup/resource_flipgroup.go new file mode 100644 index 0000000..203912c --- /dev/null +++ b/internal/service/cloudbroker/flipgroup/resource_flipgroup.go @@ -0,0 +1,364 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, +Tim Tkachev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package flipgroup + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/flipgroup" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + + log "github.com/sirupsen/logrus" +) + +func resourceFlipgroupCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceFlipgroupCreate called with name: %s, accountID %v", d.Get("name").(string), d.Get("account_id").(int)) + + c := m.(*controller.ControllerCfg) + req := flipgroup.CreateRequest{ + Name: d.Get("name").(string), + NetType: d.Get("net_type").(string), + ClientType: d.Get("client_type").(string), + } + + haveAccount, err := existAccountID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveAccount { + return diag.Errorf("resourceFlipgroupCreate: can't create Flipgroup because AccountID %d is not allowed or does not exist", d.Get("account_id").(int)) + } + req.AccountID = uint64(d.Get("account_id").(int)) + + haveVINS, err := existNetID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveVINS { + return diag.Errorf("resourceFlipgroupCreate: can't create Flipgroup because VinsID %d is not allowed or does not exist", d.Get("net_id").(int)) + } + req.NetID = uint64(d.Get("net_id").(int)) + + if IP, ok := d.GetOk("ip"); ok { + req.IP = IP.(string) + } + if description, ok := d.GetOk("desc"); ok { + req.Description = description.(string) + } + + resp, err := c.CloudBroker().FLIPGroup().Create(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(fmt.Sprint(resp.ID)) + + var warnings dc.Warnings + + if client_ids, ok := d.GetOk("client_ids"); ok { + casted := client_ids.([]interface{}) + addComputesAfterCreation(ctx, &warnings, c, casted, resp.ID) + } + + return append(warnings.Get(), resourceFlipgroupRead(ctx, d, m)...) +} + +func resourceFlipgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + fg, err := utilityFlipgroupCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + flattenFlipgroup(d, fg) + + return nil +} + +func resourceFlipgroupUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceFlipgroupUpdate called with id: %v", d.Get("flipgroup_id").(int)) + c := m.(*controller.ControllerCfg) + + haveAccount, err := existAccountID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveAccount { + return diag.Errorf("resourceFlipgroupUpdate: can't update Flipgroup because AccountID %d is not allowed or does not exist", d.Get("account_id").(int)) + } + haveVINS, err := existNetID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveVINS { + return diag.Errorf("resourceFlipgroupUpdate: can't update Flipgroup because VinsID %d is not allowed or does not exist", d.Get("net_id").(int)) + } + + fg, err := utilityFlipgroupCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + var warnings dc.Warnings + basicUpdate := false + req := flipgroup.EditRequest{FLIPGroupID: fg.ID} + + if d.HasChange("desc") { + req.Description = d.Get("desc").(string) + basicUpdate = true + } + + if d.HasChange("name") { + req.Name = d.Get("name").(string) + basicUpdate = true + } + + if basicUpdate { + _, err := c.CloudBroker().FLIPGroup().Edit(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("client_ids") { + handleClientIDsUpdate(ctx, d, c, fg, &warnings) + } + + return append(warnings.Get(), resourceFlipgroupRead(ctx, d, m)...) +} + +func resourceFlipgroupDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceFlipgroupDelete called with id: %v", d.Get("flipgroup_id").(int)) + c := m.(*controller.ControllerCfg) + + fg, err := utilityFlipgroupCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + req := flipgroup.DeleteRequest{ + FLIPGroupID: fg.ID, + } + + _, err = c.CloudBroker().FLIPGroup().Delete(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + return nil +} + +func resourceFlipgroupSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeInt, + Required: true, + Description: "Account ID", + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: "Flipgroup name", + }, + "net_id": { + Type: schema.TypeInt, + Required: true, + Description: "EXTNET or ViNS ID", + }, + "net_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS"}, true), + Description: "Network type, EXTNET or VINS", + }, + "client_type": { + Type: schema.TypeString, + Required: true, + Description: "Type of client, 'compute' ('vins' will be later)", + ValidateFunc: validation.StringInSlice([]string{"compute"}, true), + }, + "ip": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "IP address to associate with this group. If empty, the platform will autoselect IP address", + }, + "desc": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Text description of this Flipgroup instance", + }, + "client_ids": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + Description: "List of clients attached to this Flipgroup instance", + }, + "client_names": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "client_names", + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + Description: "account_name", + }, + "flipgroup_id": { + Type: schema.TypeInt, + Computed: true, + }, + "conn_id": { + Type: schema.TypeInt, + Computed: true, + }, + "conn_type": { + Type: schema.TypeString, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + Description: "created_by", + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + Description: "created_time", + }, + "default_gw": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + Description: "deleted_by", + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + Description: "deleted_time", + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + }, + "network": { + Type: schema.TypeString, + Computed: true, + Description: "network", + }, + "rg_id": { + Type: schema.TypeInt, + Computed: true, + Description: "rg_id", + }, + "rg_name": { + Type: schema.TypeString, + Computed: true, + Description: "rg_name", + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + Description: "updated_by", + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + Description: "updated_time", + }, + "net_mask": { + Type: schema.TypeInt, + Computed: true, + }, + "ckey": { + Type: schema.TypeString, + Computed: true, + }, + } +} + +func ResourceFlipgroup() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + CreateContext: resourceFlipgroupCreate, + ReadContext: resourceFlipgroupRead, + UpdateContext: resourceFlipgroupUpdate, + DeleteContext: resourceFlipgroupDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout300s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, + }, + + Schema: resourceFlipgroupSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/flipgroup/utility_flipgroup.go b/internal/service/cloudbroker/flipgroup/utility_flipgroup.go new file mode 100644 index 0000000..0ffa4fc --- /dev/null +++ b/internal/service/cloudbroker/flipgroup/utility_flipgroup.go @@ -0,0 +1,66 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, +Tim Tkachev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package flipgroup + +import ( + "context" + "strconv" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/flipgroup" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityFlipgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*flipgroup.RecordFLIPGroup, error) { + c := m.(*controller.ControllerCfg) + req := flipgroup.GetRequest{} + + if d.Id() != "" { + id, _ := strconv.ParseUint(d.Id(), 10, 64) + req.FLIPGroupID = id + } else { + req.FLIPGroupID = uint64(d.Get("flipgroup_id").(int)) + } + + log.Debugf("utilityDiskCheckPresence: load disk") + flipgroup, err := c.CloudBroker().FLIPGroup().Get(ctx, req) + if err != nil { + return nil, err + } + + return flipgroup, nil +} diff --git a/internal/service/cloudbroker/flipgroup/utility_flipgroup_list.go b/internal/service/cloudbroker/flipgroup/utility_flipgroup_list.go new file mode 100644 index 0000000..24c8dcc --- /dev/null +++ b/internal/service/cloudbroker/flipgroup/utility_flipgroup_list.go @@ -0,0 +1,86 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, +Tim Tkachev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package flipgroup + +import ( + "context" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/flipgroup" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityFlipgroupListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*flipgroup.ListFLIPGroups, error) { + c := m.(*controller.ControllerCfg) + req := flipgroup.ListRequest{} + + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + if vinsId, ok := d.GetOk("vins_id"); ok { + req.VINSID = uint64(vinsId.(int)) + } + if vinsName, ok := d.GetOk("vins_name"); ok { + req.VINSName = vinsName.(string) + } + if extnetId, ok := d.GetOk("extnet_id"); ok { + req.ExtNetID = uint64(extnetId.(int)) + } + if byIp, ok := d.GetOk("by_ip"); ok { + req.ByIP = byIp.(string) + } + if rgId, ok := d.GetOk("rg_id"); ok { + req.RGID = uint64(rgId.(int)) + } + if byID, ok := d.GetOk("by_id"); ok { + req.ByID = uint64(byID.(int)) + } + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + + log.Debugf("utilityFlipgroupListCheckPresence: load flipgroup list") + flipgroupList, err := c.CloudBroker().FLIPGroup().List(ctx, req) + if err != nil { + return nil, err + } + + return flipgroupList, nil +} diff --git a/internal/service/cloudbroker/flipgroup/utility_flipgroup_resource.go b/internal/service/cloudbroker/flipgroup/utility_flipgroup_resource.go new file mode 100644 index 0000000..07f86da --- /dev/null +++ b/internal/service/cloudbroker/flipgroup/utility_flipgroup_resource.go @@ -0,0 +1,127 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, +Tim Tkachev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package flipgroup + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/flipgroup" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" +) + +func addComputesAfterCreation(ctx context.Context, warnings *dc.Warnings, c *controller.ControllerCfg, compute_ids []interface{}, flipgroupID uint64) { + if len(compute_ids) == 0 { + return + } + + log.Debugf("Adding %v computes to flipgroup [id=%v]...", len(compute_ids), flipgroupID) + + for _, elem := range compute_ids { + compute_id := uint64(elem.(int)) + req := flipgroup.ComputeAddRequest{ + FLIPGroupID: flipgroupID, + ComputeID: compute_id, + } + + _, err := c.CloudBroker().FLIPGroup().ComputeAdd(ctx, req) + if err != nil { + warnings.Add(err) + } + } +} + +func handleClientIDsUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, fg *flipgroup.RecordFLIPGroup, warn *dc.Warnings) { + addedClients := make([]interface{}, 0) + removedClients := make([]interface{}, 0) + + old_set, new_set := d.GetChange("client_ids") + oldSlice := old_set.([]interface{}) + newSlice := new_set.([]interface{}) + + for _, oldElem := range oldSlice { + if !containsClient(newSlice, oldElem) { + removedClients = append(removedClients, oldElem) + } + } + + for _, newElem := range newSlice { + if !containsClient(oldSlice, newElem) { + addedClients = append(addedClients, newElem) + } + } + + log.Debugf("Found client_ids change with %v deletion(s) and %v addition(s) [flipgroupID=%v]", len(removedClients), len(addedClients), fg.ID) + + if len(addedClients) > 0 { + for _, id := range addedClients { + req := flipgroup.ComputeAddRequest{ + FLIPGroupID: fg.ID, + ComputeID: uint64(id.(int)), + } + + if _, err := c.CloudBroker().FLIPGroup().ComputeAdd(ctx, req); err != nil { + warn.Add(err) + } + } + } + + if len(removedClients) > 0 { + for _, id := range removedClients { + req := flipgroup.ComputeRemoveRequest{ + FLIPGroupID: fg.ID, + ComputeID: uint64(id.(int)), + } + + if _, err := c.CloudBroker().FLIPGroup().ComputeRemove(ctx, req); err != nil { + warn.Add(err) + } + } + } +} + +func containsClient(set []interface{}, check interface{}) bool { + for _, elem := range set { + elemConv := elem.(int) + checkConv := check.(int) + if elemConv == checkConv { + return true + } + } + + return false +} diff --git a/internal/service/cloudbroker/grid/data_source_grid.go b/internal/service/cloudbroker/grid/data_source_grid.go index c890d75..f66adbb 100644 --- a/internal/service/cloudbroker/grid/data_source_grid.go +++ b/internal/service/cloudbroker/grid/data_source_grid.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, diff --git a/internal/service/cloudbroker/grid/data_source_grid_get_post_diagnosis.go b/internal/service/cloudbroker/grid/data_source_grid_get_post_diagnosis.go new file mode 100644 index 0000000..022d615 --- /dev/null +++ b/internal/service/cloudbroker/grid/data_source_grid_get_post_diagnosis.go @@ -0,0 +1,117 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package grid + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceGridGetDiagnosisRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + diagnosis, err := utilityGridGetDiagnosisCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + d.SetId(d.Id()) + d.Set("diagnosis", diagnosis) + return nil +} + +func dataSourceGridGetDiagnosisSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "gid": { + Type: schema.TypeInt, + Required: true, + }, + "diagnosis": { + Type: schema.TypeString, + Computed: true, + }, + } +} + +func DataSourceGridGetDiagnosis() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceGridGetDiagnosisRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceGridGetDiagnosisSchemaMake(), + } +} + +func dataSourceGridPostDiagnosisRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + diagnosis, err := utilityGridPostDiagnosisCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + d.SetId(d.Id()) + d.Set("diagnosis", diagnosis) + return nil +} + +func dataSourceGridPostDiagnosisSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "gid": { + Type: schema.TypeInt, + Required: true, + }, + "diagnosis": { + Type: schema.TypeString, + Computed: true, + }, + } +} + +func DataSourceGridPostDiagnosis() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceGridPostDiagnosisRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceGridPostDiagnosisSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/grid/data_source_grid_get_post_status.go b/internal/service/cloudbroker/grid/data_source_grid_get_post_status.go new file mode 100644 index 0000000..4596a8c --- /dev/null +++ b/internal/service/cloudbroker/grid/data_source_grid_get_post_status.go @@ -0,0 +1,112 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package grid + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceGridGetStatusRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + grid, err := utilityGridGetStatusCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + id := uuid.New() + d.SetId(id.String()) + d.Set("status", grid) + return nil +} + +func dataSourceGridGetStatusSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "status": { + Type: schema.TypeBool, + Computed: true, + }, + } +} + +func DataSourceGridGetStatus() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceGridGetStatusRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceGridGetStatusSchemaMake(), + } +} + +func dataSourceGridPostStatusRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + grid, err := utilityGridPostStatusCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + id := uuid.New() + d.SetId(id.String()) + d.Set("status", grid) + return nil +} + +func dataSourceGridPostStatusSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "status": { + Type: schema.TypeBool, + Computed: true, + }, + } +} + +func DataSourceGridPostStatus() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceGridPostStatusRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceGridPostStatusSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/grid/data_source_grid_list.go b/internal/service/cloudbroker/grid/data_source_grid_list.go index 44ab8aa..2a9854b 100644 --- a/internal/service/cloudbroker/grid/data_source_grid_list.go +++ b/internal/service/cloudbroker/grid/data_source_grid_list.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -48,12 +48,22 @@ func dataSourceGridListRead(ctx context.Context, d *schema.ResourceData, m inter id := uuid.New() d.SetId(id.String()) d.Set("items", flattenGridList(gridList)) - + d.Set("entry_count", gridList.EntryCount) return nil } func dataSourceGridListSchemaMake() map[string]*schema.Schema { rets := map[string]*schema.Schema{ + "by_id": { + Type: schema.TypeInt, + Optional: true, + Description: "by id", + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: "name", + }, "page": { Type: schema.TypeInt, Optional: true, @@ -72,6 +82,11 @@ func dataSourceGridListSchemaMake() map[string]*schema.Schema { Schema: dataSourceGridSchemaMake(), }, }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + Description: "entry count", + }, } return rets @@ -79,6 +94,134 @@ func dataSourceGridListSchemaMake() map[string]*schema.Schema { func dataSourceGridSchemaMake() map[string]*schema.Schema { return map[string]*schema.Schema{ + "resources": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "current": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeInt, + Computed: true, + }, + "ext_ips": { + Type: schema.TypeInt, + Computed: true, + }, + "ext_traffic": { + Type: schema.TypeInt, + Computed: true, + }, + "gpu": { + Type: schema.TypeInt, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "seps": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sep_id": { + Type: schema.TypeString, + Computed: true, + }, + "data_name": { + Type: schema.TypeString, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "reserved": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeInt, + Computed: true, + }, + "ext_ips": { + Type: schema.TypeInt, + Computed: true, + }, + "ext_traffic": { + Type: schema.TypeInt, + Computed: true, + }, + "gpu": { + Type: schema.TypeInt, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "seps": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sep_id": { + Type: schema.TypeString, + Computed: true, + }, + "data_name": { + Type: schema.TypeString, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, "flag": { Type: schema.TypeString, Computed: true, diff --git a/internal/service/cloudbroker/grid/data_source_grid_list_emails.go b/internal/service/cloudbroker/grid/data_source_grid_list_emails.go new file mode 100644 index 0000000..35424b7 --- /dev/null +++ b/internal/service/cloudbroker/grid/data_source_grid_list_emails.go @@ -0,0 +1,99 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package grid + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceGridListEmailsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + gridListEmails, err := utilityGridListEmailsCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + id := uuid.New() + d.SetId(id.String()) + d.Set("items", gridListEmails.Data) + d.Set("entry_count", gridListEmails.EntryCount) + return nil +} + +func dataSourceGridListEmailsSchemaMake() map[string]*schema.Schema { + rets := map[string]*schema.Schema{ + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "page number", + }, + "size": { + Type: schema.TypeInt, + Optional: true, + Description: "page size", + }, + "items": { + Type: schema.TypeList, + Computed: true, + Description: "grid list emails", + Elem: &schema.Schema { + Type: schema.TypeString, + }, + }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + Description: "entry count", + }, + } + + return rets +} + +func DataSourceGridListEmails() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceGridListEmailsRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceGridListEmailsSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/grid/data_sourse_grid_get_consumption.go b/internal/service/cloudbroker/grid/data_sourse_grid_get_consumption.go new file mode 100644 index 0000000..ab0600d --- /dev/null +++ b/internal/service/cloudbroker/grid/data_sourse_grid_get_consumption.go @@ -0,0 +1,197 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package grid + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceGridGetConsumptionRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + gridGetConsumption, err := utilityGridGetConsumptionCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + d.SetId(strconv.FormatUint(gridGetConsumption.GID, 10)) + d.Set("consumed", flattenGridRecordResource(gridGetConsumption.Consumed)) + d.Set("reserved", flattenGridRecordResource(gridGetConsumption.Reserved)) + return nil +} + +func dataSourceGridGetConsumptionSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "grid_id": { + Type: schema.TypeInt, + Required: true, + }, + "consumed": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeInt, + Computed: true, + }, + "ext_ips": { + Type: schema.TypeInt, + Computed: true, + }, + "ext_traffic": { + Type: schema.TypeInt, + Computed: true, + }, + "gpu": { + Type: schema.TypeInt, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "seps": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sep_id": { + Type: schema.TypeString, + Computed: true, + }, + "data_name": { + Type: schema.TypeString, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "reserved": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeInt, + Computed: true, + }, + "ext_ips": { + Type: schema.TypeInt, + Computed: true, + }, + "ext_traffic": { + Type: schema.TypeInt, + Computed: true, + }, + "gpu": { + Type: schema.TypeInt, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "seps": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sep_id": { + Type: schema.TypeString, + Computed: true, + }, + "data_name": { + Type: schema.TypeString, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + } +} + +func DataSourceGridGetConsumption() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceGridGetConsumptionRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceGridGetConsumptionSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/grid/data_sourse_grid_list_consumption.go b/internal/service/cloudbroker/grid/data_sourse_grid_list_consumption.go new file mode 100644 index 0000000..92fa364 --- /dev/null +++ b/internal/service/cloudbroker/grid/data_sourse_grid_list_consumption.go @@ -0,0 +1,217 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package grid + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceGridListConsumptionRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + gridListConsumption, err := utilityGridListConsumptionCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenGridListConsumption(gridListConsumption)) + d.Set("entry_count", gridListConsumption.EntryCount) + return nil +} + +func dataSourceGridListConsumptionSchemaMake() map[string]*schema.Schema { + rets := map[string]*schema.Schema{ + "items": { + Type: schema.TypeList, + Computed: true, + Description: "grid list consumption", + Elem: &schema.Resource{ + Schema: dataSourceGridConsumptionSchemaMake(), + }, + }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + Description: "entry count", + }, + } + return rets +} + +func dataSourceGridConsumptionSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "consumed": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeInt, + Computed: true, + }, + "ext_ips": { + Type: schema.TypeInt, + Computed: true, + }, + "ext_traffic": { + Type: schema.TypeInt, + Computed: true, + }, + "gpu": { + Type: schema.TypeInt, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "seps": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sep_id": { + Type: schema.TypeString, + Computed: true, + }, + "data_name": { + Type: schema.TypeString, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "reserved": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeInt, + Computed: true, + }, + "ext_ips": { + Type: schema.TypeInt, + Computed: true, + }, + "ext_traffic": { + Type: schema.TypeInt, + Computed: true, + }, + "gpu": { + Type: schema.TypeInt, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "seps": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sep_id": { + Type: schema.TypeString, + Computed: true, + }, + "data_name": { + Type: schema.TypeString, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "id": { + Type: schema.TypeInt, + Computed: true, + }, + } +} + +func DataSourceGridListConsumption() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceGridListConsumptionRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceGridListConsumptionSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/grid/flattens.go b/internal/service/cloudbroker/grid/flattens.go index e980c35..c092516 100644 --- a/internal/service/cloudbroker/grid/flattens.go +++ b/internal/service/cloudbroker/grid/flattens.go @@ -15,9 +15,10 @@ func flattenGrid(d *schema.ResourceData, grid *grid.RecordGrid) { } func flattenGridList(gl *grid.ListGrids) []map[string]interface{} { - res := make([]map[string]interface{}, 0) + res := make([]map[string]interface{}, 0, len(gl.Data)) for _, item := range gl.Data { temp := map[string]interface{}{ + "resources": flattenGridResources(item.Resources), "name": item.Name, "flag": item.Flag, "gid": item.GID, @@ -25,8 +26,62 @@ func flattenGridList(gl *grid.ListGrids) []map[string]interface{} { "location_code": item.LocationCode, "id": item.ID, } + res = append(res, temp) + } + return res +} +func flattenGridListConsumption(gl *grid.ListResourceConsumption) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(gl.Data)) + for _, item := range gl.Data { + temp := map[string]interface{}{ + "consumed": flattenGridRecordResource(item.Consumed), + "reserved": flattenGridRecordResource(item.Reserved), + "id": item.GID, + } res = append(res, temp) } return res } + +func flattenGridResources(r grid.Resources) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "current": flattenGridRecordResource(r.Current), + "reserved": flattenGridRecordResource(r.Reserved), + } + res = append(res, temp) + return res +} + +func flattenGridRecordResource(rr grid.RecordResource) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "cpu": rr.CPU, + "disk_size": rr.DiskSize, + "disk_size_max": rr.DiskSizeMax, + "ext_ips": rr.ExtIPs, + "ext_traffic": rr.ExtTraffic, + "gpu": rr.GPU, + "ram": rr.RAM, + "seps": flattenGridSeps(rr.SEPs), + } + res = append(res, temp) + return res +} + +func flattenGridSeps(seps map[string]map[string]grid.DiskUsage) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for sepKey, sepVal := range seps { + for dataKey, dataVal := range sepVal { + temp := map[string]interface{}{ + "sep_id": sepKey, + "data_name": dataKey, + "disk_size": dataVal.DiskSize, + "disk_size_max": dataVal.DiskSizeMax, + } + res = append(res, temp) + } + } + return res +} diff --git a/internal/service/cloudbroker/grid/utility_grid.go b/internal/service/cloudbroker/grid/utility_grid.go index f30a985..1a8c1d4 100644 --- a/internal/service/cloudbroker/grid/utility_grid.go +++ b/internal/service/cloudbroker/grid/utility_grid.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -33,7 +33,7 @@ package grid import ( "context" - "errors" + "strconv" log "github.com/sirupsen/logrus" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" @@ -46,10 +46,11 @@ func utilityGridCheckPresence(ctx context.Context, d *schema.ResourceData, m int c := m.(*controller.ControllerCfg) req := grid.GetRequest{} - if gridId, ok := d.GetOk("grid_id"); ok { - req.GID = uint64(gridId.(int)) + if d.Id() != "" { + id, _ := strconv.ParseUint(d.Id(), 10, 64) + req.GID = id } else { - return nil, errors.New("grid_id is required") + req.GID = uint64(d.Get("grid_id").(int)) } log.Debugf("utilityGridCheckPresence: load grid") diff --git a/internal/service/cloudbroker/kvmvm/api.go b/internal/service/cloudbroker/grid/utility_grid_get_consumption.go similarity index 51% rename from internal/service/cloudbroker/kvmvm/api.go rename to internal/service/cloudbroker/grid/utility_grid_get_consumption.go index f7d4857..c4b45fa 100644 --- a/internal/service/cloudbroker/kvmvm/api.go +++ b/internal/service/cloudbroker/grid/utility_grid_get_consumption.go @@ -1,46 +1,64 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package kvmvm - -const KvmX86CreateAPI = "/restmachine/cloudbroker/kvmx86/create" -const KvmPPCCreateAPI = "/restmachine/cloudbroker/kvmppc/create" -const ComputeGetAPI = "/restmachine/cloudbroker/compute/get" -const RgListComputesAPI = "/restmachine/cloudbroker/rg/listComputes" -const ComputeNetAttachAPI = "/restmachine/cloudbroker/compute/netAttach" -const ComputeNetDetachAPI = "/restmachine/cloudbroker/compute/netDetach" -const ComputeDiskAttachAPI = "/restmachine/cloudbroker/compute/diskAttach" -const ComputeDiskDetachAPI = "/restmachine/cloudbroker/compute/diskDetach" -const ComputeStartAPI = "/restmachine/cloudbroker/compute/start" -const ComputeStopAPI = "/restmachine/cloudbroker/compute/stop" -const ComputeResizeAPI = "/restmachine/cloudbroker/compute/resize" -const DisksResizeAPI = "/restmachine/cloudbroker/disks/resize2" -const ComputeDeleteAPI = "/restmachine/cloudbroker/compute/delete" +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package grid + +import ( + "context" + "strconv" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityGridGetConsumptionCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*grid.RecordResourcesConsumption, error) { + c := m.(*controller.ControllerCfg) + req := grid.GetResourceConsumptionRequest{} + + if d.Id() != "" { + id, _ := strconv.ParseUint(d.Id(), 10, 64) + req.GridID = id + } else { + req.GridID = uint64(d.Get("grid_id").(int)) + } + + log.Debugf("utilityGridGetConsumptionCheckPresence: load specific grid") + gridGetConsumption, err := c.CloudBroker().Grid().GetResourceConsumption(ctx,req) + if err != nil { + return nil, err + } + + return gridGetConsumption, nil +} diff --git a/internal/service/cloudbroker/grid/utility_grid_get_post_diagnosis.go b/internal/service/cloudbroker/grid/utility_grid_get_post_diagnosis.go new file mode 100644 index 0000000..1cd1d62 --- /dev/null +++ b/internal/service/cloudbroker/grid/utility_grid_get_post_diagnosis.go @@ -0,0 +1,84 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package grid + +import ( + "context" + "strconv" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityGridGetDiagnosisCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) { + c := m.(*controller.ControllerCfg) + req := grid.GetDiagnosisRequest{} + + if d.Id() != "" { + id, _ := strconv.ParseUint(d.Id(), 10, 64) + req.GID = id + } else { + req.GID = uint64(d.Get("gid").(int)) + } + + log.Debugf("utilityGridListConsumptionCheckPresence: load grid list consumption") + gridGetDiagnosis, err := c.CloudBroker().Grid().GetDiagnosisGET(ctx, req) + if err != nil { + return "", err + } + + return gridGetDiagnosis, nil +} + +func utilityGridPostDiagnosisCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) { + c := m.(*controller.ControllerCfg) + req := grid.GetDiagnosisRequest{} + + if d.Id() != "" { + id, _ := strconv.ParseUint(d.Id(), 10, 64) + req.GID = id + } else { + req.GID = uint64(d.Get("gid").(int)) + } + + log.Debugf("utilityGridListConsumptionCheckPresence: load grid list consumption") + gridPostDiagnosis, err := c.CloudBroker().Grid().GetDiagnosis(ctx, req) + if err != nil { + return "", err + } + + return gridPostDiagnosis, nil +} diff --git a/internal/service/cloudbroker/grid/utility_grid_get_post_status.go b/internal/service/cloudbroker/grid/utility_grid_get_post_status.go new file mode 100644 index 0000000..118ab2e --- /dev/null +++ b/internal/service/cloudbroker/grid/utility_grid_get_post_status.go @@ -0,0 +1,66 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package grid + +import ( + "context" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityGridGetStatusCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + + log.Debugf("utilityGridListConsumptionCheckPresence: load grid list consumption") + gridGetStatus, err := c.CloudBroker().Grid().StatusGET(ctx) + if err != nil { + return false, err + } + + return gridGetStatus, nil +} + +func utilityGridPostStatusCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + + log.Debugf("utilityGridListConsumptionCheckPresence: load grid list consumption") + gridGetStatus, err := c.CloudBroker().Grid().Status(ctx) + if err != nil { + return false, err + } + + return gridGetStatus, nil +} diff --git a/internal/service/cloudbroker/grid/utility_grid_list.go b/internal/service/cloudbroker/grid/utility_grid_list.go index 0bde99c..d6503b8 100644 --- a/internal/service/cloudbroker/grid/utility_grid_list.go +++ b/internal/service/cloudbroker/grid/utility_grid_list.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -45,6 +45,12 @@ func utilityGridListCheckPresence(ctx context.Context, d *schema.ResourceData, m c := m.(*controller.ControllerCfg) req := grid.ListRequest{} + if byID, ok := d.GetOk("by_id"); ok { + req.ByID = uint64(byID.(int)) + } + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } if page, ok := d.GetOk("page"); ok { req.Page = uint64(page.(int)) } diff --git a/internal/service/cloudbroker/grid/utility_grid_list_emails.go b/internal/service/cloudbroker/grid/utility_grid_list_emails.go new file mode 100644 index 0000000..d3466e6 --- /dev/null +++ b/internal/service/cloudbroker/grid/utility_grid_list_emails.go @@ -0,0 +1,63 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package grid + +import ( + "context" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityGridListEmailsCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*grid.ListEmails, error) { + c := m.(*controller.ControllerCfg) + req := grid.ListEmailsRequest{} + + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + + log.Debugf("utilityGridListEmailsCheckPresence: load grid list of emails") + gridListEmails, err := c.CloudBroker().Grid().ListEmails(ctx, req) + if err != nil { + return nil, err + } + + return gridListEmails, nil +} diff --git a/internal/service/cloudbroker/grid/utulity_grid_list_consumption.go b/internal/service/cloudbroker/grid/utulity_grid_list_consumption.go new file mode 100644 index 0000000..ad86209 --- /dev/null +++ b/internal/service/cloudbroker/grid/utulity_grid_list_consumption.go @@ -0,0 +1,55 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package grid + +import ( + "context" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityGridListConsumptionCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*grid.ListResourceConsumption, error) { + c := m.(*controller.ControllerCfg) + + log.Debugf("utilityGridListConsumptionCheckPresence: load grid list consumption") + gridListConsumption, err := c.CloudBroker().Grid().ListResourceConsumption(ctx) + if err != nil { + return nil, err + } + + return gridListConsumption, nil +} diff --git a/internal/service/cloudbroker/ic/input_checks.go b/internal/service/cloudbroker/ic/input_checks.go new file mode 100644 index 0000000..6fd78b8 --- /dev/null +++ b/internal/service/cloudbroker/ic/input_checks.go @@ -0,0 +1,162 @@ +// Input checks +package ic + +import ( + "context" + "fmt" + + cb_extnet "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" + cb_gid "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" + cb_image "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" + cb_rg "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + cb_stack "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/stack" + cb_vins "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func ExistRG(ctx context.Context, rgId uint64, c *controller.ControllerCfg) error { + req := cb_rg.ListRequest{ + ByID: rgId, + IncludeDeleted: false, + } + + rgList, err := c.CloudBroker().RG().List(ctx, req) + if err != nil { + return err + } + + if len(rgList.Data) == 0 { + return fmt.Errorf("RG with id %v not found", rgId) + } + + return nil +} + +func ExistImage(ctx context.Context, imageId uint64, c *controller.ControllerCfg) error { + req := cb_image.ListRequest{ + ByID: imageId, + } + + listImages, err := c.CloudBroker().Image().List(ctx, req) + if err != nil { + return err + } + + if len(listImages.Data) == 0 { + return fmt.Errorf("image with id %v not found", imageId) + } + + return nil +} + +func ExistVinses(ctx context.Context, vinsIds []uint64, c *controller.ControllerCfg) []error { + var errs []error + + req := cb_vins.ListRequest{ + IncludeDeleted: false, + } + + vinsList, err := c.CloudBroker().VINS().List(ctx, req) + if err != nil { + errs = append(errs, err) + return errs + } + + for _, vinsId := range vinsIds { + found := false + + for _, vins := range vinsList.Data { + if vinsId == vins.ID { + found = true + break + } + } + + if !found { + errs = append(errs, fmt.Errorf("VINS with ID %v not found", vinsId)) + } + } + + return errs +} + +func ExistExtNets(ctx context.Context, extNetIds []uint64, c *controller.ControllerCfg) []error { + var errs []error + + req := cb_extnet.ListRequest{} + + extNetList, err := c.CloudBroker().ExtNet().List(ctx, req) + if err != nil { + errs = append(errs, err) + return errs + } + + for _, extNetId := range extNetIds { + found := false + + for _, extNet := range extNetList.Data { + if extNetId == extNet.ID { + found = true + break + } + } + + if !found { + errs = append(errs, fmt.Errorf("EXTNET with ID %v not found", extNetId)) + } + } + + return errs +} + +func ExistExtNet(ctx context.Context, extNetId uint64, c *controller.ControllerCfg) error { + + req := cb_extnet.ListRequest{ + ByID: extNetId, + } + + extNetList, err := c.CloudBroker().ExtNet().List(ctx, req) + if err != nil { + return err + } + + if len(extNetList.Data) == 0 { + return fmt.Errorf("EXTNET with ID %v not found", extNetId) + } + + return nil +} + +func ExistGID(ctx context.Context, gid uint64, c *controller.ControllerCfg) error { + req := cb_gid.ListRequest{} + + gridList, err := c.CloudBroker().Grid().List(ctx, req) + if err != nil { + return err + } + + for _, grid := range gridList.Data { + if grid.GID == gid { + return nil + } + } + + return fmt.Errorf("GID with id %v not found", gid) +} + +func ExistStack(ctx context.Context, stackId uint64, c *controller.ControllerCfg) error { + req := cb_stack.ListRequest{ + ByID: stackId, + } + + stackList, err := c.CloudBroker().Stack().List(ctx, req) + if err != nil { + return err + } + + if len(stackList.Data) == 0 { + return fmt.Errorf("stack with id %v not found", stackList) + } + + return nil +} diff --git a/internal/service/cloudbroker/kvmvm/data_source_compute.go b/internal/service/cloudbroker/kvmvm/data_source_compute.go index 030416e..0d58e35 100644 --- a/internal/service/cloudbroker/kvmvm/data_source_compute.go +++ b/internal/service/cloudbroker/kvmvm/data_source_compute.go @@ -76,17 +76,6 @@ func parseComputeDisksToExtraDisks(disks compute.ListDisks) []interface{} { return result } -func findBootDisk(disks compute.ListDisks) *compute.ItemDisk { - for _, d := range disks { - if d.Type == "B" { - return &d - } - } - - // some computes don't have a boot disk, so... - return &compute.ItemDisk{} -} - // Parse the list of interfaces from compute/get response into a list of networks // attached to this compute func parseComputeInterfacesToNetworks(ifaces compute.ListInterfaces) []interface{} { @@ -113,7 +102,7 @@ func parseComputeInterfacesToNetworks(ifaces compute.ListInterfaces) []interface return result } -func flattenCompute(d *schema.ResourceData, compFacts *compute.RecordCompute) error { +func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute) error { // This function expects that compFacts string contains response from API compute/get, // i.e. detailed information about compute instance. // @@ -179,7 +168,7 @@ func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interf return diag.FromErr(err) } - if err = flattenCompute(d, compFacts); err != nil { + if err = flattenDataCompute(d, compFacts); err != nil { return diag.FromErr(err) } @@ -297,15 +286,15 @@ func DataSourceCompute() *schema.Resource { }, */ - "network": { - Type: schema.TypeSet, - Optional: true, - MaxItems: constants.MaxNetworksPerCompute, - Elem: &schema.Resource{ - Schema: networkSubresourceSchemaMake(), - }, - Description: "Network connection(s) for this compute.", - }, + // "network": { + // Type: schema.TypeSet, + // Optional: true, + // MaxItems: constants.MaxNetworksPerCompute, + // Elem: &schema.Resource{ + // Schema: networkSubresourceSchemaMake(), + // }, + // Description: "Network connection(s) for this compute.", + // }, "os_users": { Type: schema.TypeList, diff --git a/internal/service/cloudbroker/kvmvm/flattens.go b/internal/service/cloudbroker/kvmvm/flattens.go new file mode 100644 index 0000000..fcf0ede --- /dev/null +++ b/internal/service/cloudbroker/kvmvm/flattens.go @@ -0,0 +1,219 @@ +package kvmvm + +import ( + "encoding/json" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute" +) + +func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) error { + log.Debugf("flattenCompute: ID %d, RG ID %d", computeRec.ID, computeRec.RGID) + + customFields, _ := json.Marshal(computeRec.CustomFields) + devices, _ := json.Marshal(computeRec.Devices) + + bootDisk := findBootDisk(computeRec.Disks) + + d.Set("account_id", computeRec.AccountID) + d.Set("account_name", computeRec.AccountName) + d.Set("affinity_label", computeRec.AffinityLabel) + d.Set("affinity_weight", computeRec.AffinityWeight) + d.Set("affinity_rules", flattenAffinityRules(computeRec.AffinityRules)) + d.Set("anti_affinity_rules", flattenAffinityRules(computeRec.AntiAffinityRules)) + d.Set("arch", computeRec.Arch) + d.Set("boot_order", computeRec.BootOrder) + d.Set("boot_disk_size", computeRec.BootDiskSize) + d.Set("clone_reference", computeRec.CloneReference) + d.Set("clones", computeRec.Clones) + d.Set("computeci_id", computeRec.ComputeCIID) + d.Set("created_by", computeRec.CreatedBy) + d.Set("created_time", computeRec.CreatedTime) + d.Set("custom_fields", string(customFields)) + d.Set("deleted_by", computeRec.DeletedBy) + d.Set("deleted_time", computeRec.DeletedTime) + d.Set("description", computeRec.Description) + d.Set("devices", string(devices)) + d.Set("disks", + flattenComputeDisks( + computeRec.Disks, + d.Get("extra_disks").(*schema.Set).List(), + bootDisk.ID, + ), + ) + d.Set("gid", computeRec.GID) + d.Set("guid", computeRec.GUID) + d.Set("compute_id", computeRec.ID) + d.Set("image_id", computeRec.ImageID) + d.Set("interfaces", flattenInterfaces(computeRec.Interfaces)) + d.Set("lock_status", computeRec.LockStatus) + d.Set("manager_id", computeRec.ManagerID) + d.Set("manager_type", computeRec.ManagerType) + d.Set("migrationjob", computeRec.MigrationJob) + d.Set("milestones", computeRec.Milestones) + d.Set("os_users", flattenOSUsers(computeRec.OSUsers)) + d.Set("pinned", computeRec.Pinned) + d.Set("reference_id", computeRec.ReferenceID) + d.Set("registered", computeRec.Registered) + d.Set("res_name", computeRec.ResName) + d.Set("rg_name", computeRec.RGName) + d.Set("snap_sets", flattenSnapSets(computeRec.SnapSets)) + d.Set("stack_id", computeRec.StackID) + d.Set("stack_name", computeRec.StackName) + d.Set("stateless_sep_id", computeRec.StatelessSEPID) + d.Set("stateless_sep_type", computeRec.StatelessSEPType) + d.Set("status", computeRec.Status) + d.Set("tags", flattenTags(computeRec.Tags)) + d.Set("tech_status", computeRec.TechStatus) + d.Set("updated_by", computeRec.UpdatedBy) + d.Set("updated_time", computeRec.UpdatedTime) + d.Set("user_managed", computeRec.UserManaged) + d.Set("vgpus", computeRec.VGPUs) + d.Set("virtual_image_id", computeRec.VirtualImageID) + + return nil +} + +func flattenTags(tags map[string]interface{}) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(tags)) + + for k, v := range tags { + res = append(res, map[string]interface{}{ + "key": k, + "val": v, + }) + } + + return res +} + +func flattenSnapSets(snaps compute.ListSnapshots) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(snaps)) + + for _, snap := range snaps { + res = append(res, map[string]interface{}{ + "disks": snap.Disks, + "guid": snap.GUID, + "label": snap.Label, + "timestamp": snap.Timestamp, + }) + } + + return res +} + +func flattenOSUsers(users compute.ListOSUsers) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(users)) + + for _, user := range users { + res = append(res, map[string]interface{}{ + "guid": user.GUID, + "login": user.Login, + "password": user.Password, + "public_key": user.PubKey, + }) + } + + return res +} + +func flattenInterfaces(ifaces compute.ListInterfaces) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(ifaces)) + + for _, iface := range ifaces { + res = append(res, map[string]interface{}{ + "conn_id": iface.ConnID, + "conn_type": iface.ConnType, + "def_gw": iface.DefGW, + "flip_group_id": iface.FLIPGroupID, + "guid": iface.GUID, + "ip_address": iface.IPAddress, + "listen_ssh": iface.ListenSSH, + "mac": iface.MAC, + "name": iface.Name, + "net_id": iface.NetID, + "netmask": iface.NetMask, + "net_type": iface.NetType, + "pci_slot": iface.PCISlot, + "qos": flattenQOS(iface.QOS), + "target": iface.Target, + "type": iface.Type, + "vnfs": iface.VNFs, + }) + } + + return res +} + +func flattenQOS(qos compute.QOS) []map[string]interface{} { + return []map[string]interface{}{ + { + "e_rate": qos.ERate, + "guid": qos.GUID, + "in_brust": qos.InBurst, + "in_rate": qos.InRate, + }, + } +} + +func flattenComputeDisks(disksList compute.ListDisks, extraDisks []interface{}, bootDiskId uint64) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(disksList)) + for _, disk := range disksList { + if disk.ID == bootDiskId || findInExtraDisks(uint(disk.ID), extraDisks) { //skip main bootdisk and extraDisks + continue + } + temp := map[string]interface{}{ + "disk_name": disk.Name, + "size": disk.SizeMax, + "sep_id": disk.SEPID, + "disk_type": disk.Type, + "pool": disk.Pool, + "desc": disk.Description, + "image_id": disk.ImageID, + "disk_id": disk.ID, + "shareable": disk.Shareable, + "size_used": disk.SizeUsed, + } + res = append(res, temp) + } + sort.Slice(res, func(i, j int) bool { + return res[i]["disk_id"].(uint64) < res[j]["disk_id"].(uint64) + }) + return res +} + +func findInExtraDisks(diskId uint, extraDisks []interface{}) bool { + for _, ExtraDisk := range extraDisks { + if diskId == uint(ExtraDisk.(int)) { + return true + } + } + return false +} + +func findBootDisk(disks compute.ListDisks) *compute.ItemDisk { + for _, disk := range disks { + if disk.Type == "B" { + return &disk + } + } + return nil +} + +func flattenAffinityRules(rules compute.ListRules) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(rules)) + + for _, rule := range rules { + res = append(res, map[string]interface{}{ + "topology": rule.Topology, + "policy": rule.Policy, + "mode": rule.Mode, + "key": rule.Key, + "value": rule.Value, + }) + } + + return res +} diff --git a/internal/service/cloudbroker/kvmvm/models.go b/internal/service/cloudbroker/kvmvm/models.go deleted file mode 100644 index ecbe06e..0000000 --- a/internal/service/cloudbroker/kvmvm/models.go +++ /dev/null @@ -1,190 +0,0 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package kvmvm - -type DiskRecord struct { - Acl map[string]interface{} `json:"acl"` - AccountID int `json:"accountId"` - AccountName string `json:"accountName"` - BootPartition int `json:"bootPartition"` - CreatedTime uint64 `json:"creationTime"` - ComputeID int `json:"computeId"` - ComputeName string `json:"computeName"` - DeletedTime uint64 `json:"deletionTime"` - DeviceName string `json:"devicename"` - Desc string `json:"desc"` - DestructionTime uint64 `json:"destructionTime"` - DiskPath string `json:"diskPath"` - GridID int `json:"gid"` - GUID int `json:"guid"` - ID uint `json:"id"` - ImageID int `json:"imageId"` - Images []int `json:"images"` - IOTune map[string]interface{} `json:"iotune"` - IQN string `json:"iqn"` - Login string `json:"login"` - Name string `json:"name"` - MachineId int `json:"machineId"` - MachineName string `json:"machineName"` - Milestones uint64 `json:"milestones"` - Order int `json:"order"` - Params string `json:"params"` - Passwd string `json:"passwd"` - ParentId int `json:"parentId"` - PciSlot int `json:"pciSlot"` - Pool string `json:"pool"` - PurgeTime uint64 `json:"purgeTime"` - PurgeAttempts uint64 `json:"purgeAttempts"` - RealityDeviceNumber int `json:"realityDeviceNumber"` - ReferenceId string `json:"referenceId"` - ResID string `json:"resId"` - ResName string `json:"resName"` - Role string `json:"role"` - SepType string `json:"sepType"` - SepID int `json:"sepId"` // NOTE: absent from compute/get output - SizeMax int `json:"sizeMax"` - SizeUsed float64 `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space - Snapshots []SnapshotRecord `json:"snapshots"` - Status string `json:"status"` - TechStatus string `json:"techStatus"` - Type string `json:"type"` - UpdateBy uint64 `json:"updateBy"` - VMID int `json:"vmid"` -} - -type InterfaceRecord struct { - ConnID int `json:"connId"` // This is VLAN ID or VxLAN ID, depending on ConnType - ConnType string `json:"connType"` // Either "VLAN" or "VXLAN" tag - DefaultGW string `json:"defGw"` - Guid string `json:"guid"` - IPAddress string `json:"ipAddress"` // without trailing network mask, i.e. "192.168.1.3" - MAC string `json:"mac"` - Name string `json:"name"` - NetID int `json:"netId"` // This is either ExtNet ID or ViNS ID, depending on NetType - NetMask int `json:"netMask"` - NetType string `json:"netType"` // Either "EXTNET" or "VINS" tag - PciSlot int `json:"pciSlot"` - Target string `json:"target"` - Type string `json:"type"` - VNFs []int `json:"vnfs"` - QOS InterfaceQosRecord `json:"qos"` -} - -type InterfaceQosRecord struct { - ERate int `json:"eRate"` - Guid string `json:"guid"` - InBurst int `json:"inBurst"` - InRate int `json:"inRate"` -} - -type SnapshotRecord struct { - Guid string `json:"guid"` - Label string `json:"label"` - ResId string `json:"resId"` - SnapSetGuid string `json:"snapSetGuid"` - SnapSetTime uint64 `json:"snapSetTime"` - TimeStamp uint64 `json:"timestamp"` -} - -type SnapshotRecordList []SnapshotRecord - -type ComputeGetResp struct { - // ACLs `json:"ACL"` - it is a dictionary, special parsing required - AccountID int `json:"accountId"` - AccountName string `json:"accountName"` - Arch string `json:"arch"` - BootDiskSize int `json:"bootdiskSize"` - CloneReference int `json:"cloneReference"` - Clones []int `json:"clones"` - Cpu int `json:"cpus"` - Desc string `json:"desc"` - Disks []DiskRecord `json:"disks"` - Driver string `json:"driver"` - GridID int `json:"gid"` - ID uint `json:"id"` - ImageID int `json:"imageId"` - ImageName string `json:"imageName"` - Interfaces []InterfaceRecord `json:"interfaces"` - LockStatus string `json:"lockStatus"` - ManagerID int `json:"managerId"` - ManagerType string `json:"manageType"` - Name string `json:"name"` - NatableVinsID int `json:"natableVinsId"` - NatableVinsIP string `json:"natableVinsIp"` - NatableVinsName string `json:"natableVinsName"` - NatableVinsNet string `json:"natableVinsNetwork"` - NatableVinsNetName string `json:"natableVinsNetworkName"` - OsUsers []OsUserRecord `json:"osUsers"` - Ram int `json:"ram"` - RgID int `json:"rgId"` - RgName string `json:"rgName"` - SnapSets []SnapSetRecord `json:"snapSets"` - Status string `json:"status"` - // Tags []string `json:"tags"` // Tags were reworked since DECORT 3.7.1 - TechStatus string `json:"techStatus"` - TotalDiskSize int `json:"totalDiskSize"` - UpdatedBy string `json:"updatedBy"` - UpdateTime uint64 `json:"updateTime"` - UserManaged bool `json:"userManaged"` - Vgpus []int `json:"vgpus"` - VinsConnected int `json:"vinsConnected"` - VirtualImageID int `json:"virtualImageId"` -} - -type OsUserRecord struct { - Guid string `json:"guid"` - Login string `json:"login"` - Password string `json:"password"` - PubKey string `json:"pubkey"` -} - -type SnapSetRecord struct { - Disks []int `json:"disks"` - Guid string `json:"guid"` - Label string `json:"label"` - TimeStamp uint64 `json:"timestamp"` -} - -type ComputeBriefRecord struct { // this is a brief compute specifiaction as returned by API rg/listComputes - // we do not even include here all fields as returned by this API, but only the most important that - // are really necessary to identify and distinguish computes - AccountID int `json:"accountId"` - AccountName string `json:"accountName"` - Name string `json:"name"` - ID uint `json:"id"` - RgID int `json:"rgId"` - RgName string `json:"rgName"` - Status string `json:"status"` - TechStatus string `json:"techStatus"` -} - -type RgListComputesResp []ComputeBriefRecord diff --git a/internal/service/cloudbroker/kvmvm/network_subresource.go b/internal/service/cloudbroker/kvmvm/network_subresource.go deleted file mode 100644 index 7068e7f..0000000 --- a/internal/service/cloudbroker/kvmvm/network_subresource.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package kvmvm - -import ( - "bytes" - "hash/fnv" - - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/statefuncs" - log "github.com/sirupsen/logrus" - - "sort" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -// This is subresource of compute resource used when creating/managing compute network connections - -func networkSubresIPAddreDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool { - if newVal != "" && newVal != oldVal { - log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal) - return false - } - log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=TRUE", key, oldVal, newVal) - return true // suppress difference -} - -// This function is based on the original Terraform SerializeResourceForHash found -// in helper/schema/serialize.go -// It skips network subresource attributes, which are irrelevant for identification -// of unique network blocks -func networkSubresourceSerialize(output *bytes.Buffer, val interface{}, resource *schema.Resource) { - if val == nil { - return - } - - rs := resource.Schema - m := val.(map[string]interface{}) - - keys := make([]string, 0, len(rs)) - allComputed := true - - for k, val := range rs { - if val.Optional || val.Required { - allComputed = false - } - - keys = append(keys, k) - } - - sort.Strings(keys) - for _, k := range keys { - // explicitly ignore "ip_address" when hashing - if k == "ip_address" { - continue - } - - subSchema := rs[k] - // Skip attributes that are not user-provided. Computed attributes - // do not contribute to the hash since their ultimate value cannot - // be known at plan/diff time. - if !allComputed && !(subSchema.Required || subSchema.Optional) { - continue - } - - output.WriteString(k) - output.WriteRune(':') - value := m[k] - schema.SerializeValueForHash(output, value, subSchema) - } -} - -// HashNetworkSubresource hashes network subresource of compute resource. It uses -// specially designed networkSubresourceSerialize (see above) to make sure hashing -// does not involve attributes that we deem irrelevant to the uniqueness of network -// subresource definitions. -// It is this function that should be specified as SchemaSetFunc when creating Set -// from network subresource (e.g. in flattenCompute) -// -// This function is based on the original Terraform function HashResource from -// helper/schema/set.go -func HashNetworkSubresource(resource *schema.Resource) schema.SchemaSetFunc { - return func(v interface{}) int { - var serialized bytes.Buffer - networkSubresourceSerialize(&serialized, v, resource) - - hs := fnv.New32a() - hs.Write(serialized.Bytes()) - return int(hs.Sum32()) - } -} - -func networkSubresourceSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "net_type": { - Type: schema.TypeString, - Required: true, - StateFunc: statefuncs.StateFuncToUpper, - ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS"}, false), // observe case while validating - Description: "Type of the network for this connection, either EXTNET or VINS.", - }, - - "net_id": { - Type: schema.TypeInt, - Required: true, - Description: "ID of the network for this connection.", - }, - - "ip_address": { - Type: schema.TypeString, - Optional: true, - Computed: true, - DiffSuppressFunc: networkSubresIPAddreDiffSupperss, - Description: "Optional IP address to assign to this connection. This IP should belong to the selected network and free for use.", - }, - - "mac": { - Type: schema.TypeString, - Computed: true, - Description: "MAC address associated with this connection. MAC address is assigned automatically.", - }, - } - return rets -} diff --git a/internal/service/cloudbroker/kvmvm/resource_check_input_values.go b/internal/service/cloudbroker/kvmvm/resource_check_input_values.go new file mode 100644 index 0000000..71f8e8e --- /dev/null +++ b/internal/service/cloudbroker/kvmvm/resource_check_input_values.go @@ -0,0 +1,63 @@ +package kvmvm + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic" +) + +func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics { + errs := []error{} + + if err := ic.ExistRG(ctx, uint64(d.Get("rg_id").(int)), c); err != nil { + errs = append(errs, err) + } + + if err := ic.ExistImage(ctx, uint64(d.Get("image_id").(int)), c); err != nil { + errs = append(errs, err) + } + + if netErrs := existNetworks(ctx, d, c); errs != nil { + errs = append(errs, netErrs...) + } + + return dc.ErrorsToDiagnostics(errs) +} + +func existNetworks(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) []error { + var errs []error + var vinsIds, extNetIds []uint64 + + networksIface, ok := d.GetOk("network") + if !ok { + return nil + } + + networkList := networksIface.(*schema.Set).List() + for _, elem := range networkList { + network := elem.(map[string]interface{}) + + switch network["net_type"].(string) { + case "VINS": + vinsIds = append(vinsIds, uint64(network["net_id"].(int))) + case "EXTNET": + extNetIds = append(extNetIds, uint64(network["net_id"].(int))) + default: + continue + } + } + + if vinsErrs := ic.ExistVinses(ctx, vinsIds, c); vinsErrs != nil { + errs = append(errs, vinsErrs...) + } + + if extNetErrs := ic.ExistExtNets(ctx, extNetIds, c); extNetErrs != nil { + errs = append(errs, extNetErrs...) + } + + return errs +} diff --git a/internal/service/cloudbroker/kvmvm/resource_compute.go b/internal/service/cloudbroker/kvmvm/resource_compute.go index ef728f4..997697b 100644 --- a/internal/service/cloudbroker/kvmvm/resource_compute.go +++ b/internal/service/cloudbroker/kvmvm/resource_compute.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Nikita Sorokin, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -34,107 +35,154 @@ package kvmvm import ( "context" "strconv" + "strings" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" log "github.com/sirupsen/logrus" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/kvmppc" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/kvmx86" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/statefuncs" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" ) -func cloudInitDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool { - if oldVal == "" && newVal != "applied" { - log.Debugf("cloudInitDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal) - return false - } - log.Debugf("cloudInitDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=TRUE", key, oldVal, newVal) - return true -} - func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Debugf("resourceComputeCreate: called for Compute name %q, RG ID %d", d.Get("name").(string), d.Get("rg_id").(int)) - c := m.(*controller.ControllerCfg) - reqPPC := kvmppc.CreateRequest{} - reqX86 := kvmx86.CreateRequest{} - - reqPPC.RGID = uint64(d.Get("rg_id").(int)) - reqX86.RGID = uint64(d.Get("rg_id").(int)) - reqPPC.Name = d.Get("name").(string) - reqX86.Name = d.Get("name").(string) + createReqX86 := kvmx86.CreateRequest{} + createReqPPC := kvmppc.CreateRequest{} - reqPPC.CPU = uint64(d.Get("cpu").(int)) - reqPPC.RAM = uint64(d.Get("ram").(int)) - reqPPC.ImageID = uint64(d.Get("image_id").(int)) - reqPPC.BootDisk = uint64(d.Get("boot_disk_size").(int)) - reqPPC.Start = false - - reqX86.CPU = uint64(d.Get("cpu").(int)) - reqX86.RAM = uint64(d.Get("ram").(int)) - reqX86.ImageID = uint64(d.Get("image_id").(int)) - reqX86.BootDisk = uint64(d.Get("boot_disk_size").(int)) - reqX86.Start = false + if diags := checkParamsExistence(ctx, d, c); diags != nil { + return diags + } - argVal, argSet := d.GetOk("description") - if argSet { - reqPPC.Description = argVal.(string) - reqX86.Description = argVal.(string) + argVal, ok := d.GetOk("description") + if ok { + createReqPPC.Description = argVal.(string) + createReqX86.Description = argVal.(string) } if sepID, ok := d.GetOk("sep_id"); ok { - reqPPC.SEPID = uint64(sepID.(int)) - reqX86.SEPID = uint64(sepID.(int)) + createReqPPC.SEPID = uint64(sepID.(int)) + createReqX86.SEPID = uint64(sepID.(int)) } if pool, ok := d.GetOk("pool"); ok { - reqPPC.Pool = pool.(string) - reqX86.Pool = pool.(string) + createReqPPC.Pool = pool.(string) + createReqX86.Pool = pool.(string) + } + + if ipaType, ok := d.GetOk("ipa_type"); ok { + createReqPPC.IPAType = ipaType.(string) + createReqX86.IPAType = ipaType.(string) } - argVal, argSet = d.GetOk("cloud_init") - if argSet { + if bootSize, ok := d.GetOk("boot_disk_size"); ok { + createReqPPC.BootDisk = uint64(bootSize.(int)) + createReqX86.BootDisk = uint64(bootSize.(int)) + } + + if IS, ok := d.GetOk("is"); ok { + createReqPPC.IS = IS.(string) + createReqX86.IS = IS.(string) + } + + if !d.Get("with_default_vins").(bool) { + createReqX86.Interfaces = make([]kvmx86.Interface, 0) + } + + if networks, ok := d.GetOk("network"); ok { + if networks.(*schema.Set).Len() > 0 { + ns := networks.(*schema.Set).List() + + interfaces := make([]kvmx86.Interface, 0) + for _, elem := range ns { + netInterfaceVal := elem.(map[string]interface{}) + reqInterface := kvmx86.Interface{ + NetType: netInterfaceVal["net_type"].(string), + NetID: uint64(netInterfaceVal["net_id"].(int)), + } + + ipaddr, ipSet := netInterfaceVal["ip_address"] + if ipSet { + reqInterface.IPAddr = ipaddr.(string) + } + + interfaces = append(interfaces, reqInterface) + } + + createReqX86.Interfaces = interfaces + } + } + + argVal, ok = d.GetOk("cloud_init") + if ok { userdata := argVal.(string) if userdata != "" && userdata != "applied" { - reqPPC.Userdata = userdata - reqX86.Userdata = userdata + createReqPPC.Userdata = strings.TrimSpace(userdata) + createReqX86.Userdata = strings.TrimSpace(userdata) } } - var createdID uint64 + var computeId uint64 driver := d.Get("driver").(string) if driver == "KVM_PPC" { + createReqPPC.RGID = uint64(d.Get("rg_id").(int)) + createReqPPC.Name = d.Get("name").(string) + createReqPPC.CPU = uint64(d.Get("cpu").(int)) + createReqPPC.RAM = uint64(d.Get("ram").(int)) + createReqPPC.ImageID = uint64(d.Get("image_id").(int)) + log.Debugf("resourceComputeCreate: creating Compute of type KVM VM PowerPC") - id, err := c.CloudBroker().KVMPPC().Create(ctx, reqPPC) + apiResp, err := c.CloudBroker().KVMPPC().Create(ctx, createReqPPC) if err != nil { return diag.FromErr(err) } - createdID = id + d.SetId(strconv.FormatUint(apiResp, 10)) + computeId = apiResp } else { + createReqX86.RGID = uint64(d.Get("rg_id").(int)) + createReqX86.Name = d.Get("name").(string) + createReqX86.CPU = uint64(d.Get("cpu").(int)) + createReqX86.RAM = uint64(d.Get("ram").(int)) + createReqX86.ImageID = uint64(d.Get("image_id").(int)) + + // createReqX86.Driver = driver + + if custom_fields, ok := d.GetOk("custom_fields"); ok { + val := custom_fields.(string) + val = strings.ReplaceAll(val, "\\", "") + val = strings.ReplaceAll(val, "\n", "") + val = strings.ReplaceAll(val, "\t", "") + val = strings.TrimSpace(val) + + createReqX86.CustomField = val + } + log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86") - id, err := c.CloudBroker().KVMX86().Create(ctx, reqX86) + apiResp, err := c.CloudBroker().KVMX86().Create(ctx, createReqX86) if err != nil { return diag.FromErr(err) } - createdID = id + d.SetId(strconv.FormatUint(apiResp, 10)) + computeId = apiResp } - d.SetId(strconv.FormatUint(createdID, 10)) + warnings := dc.Warnings{} cleanup := false defer func() { if cleanup { req := compute.DeleteRequest{ - ComputeID: createdID, + ComputeID: computeId, Permanently: true, DetachDisks: true, } @@ -147,61 +195,322 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf } }() - log.Debugf("resourceComputeCreate: new simple Compute ID %d, name %s created", createdID, d.Get("name").(string)) + log.Debugf("resourceComputeCreate: new simple Compute ID %d, name %s created", computeId, d.Get("name").(string)) - argVal, argSet = d.GetOk("extra_disks") - if argSet && argVal.(*schema.Set).Len() > 0 { + argVal, ok = d.GetOk("extra_disks") + if ok && argVal.(*schema.Set).Len() > 0 { log.Debugf("resourceComputeCreate: calling utilityComputeExtraDisksConfigure to attach %d extra disk(s)", argVal.(*schema.Set).Len()) - err := utilityComputeExtraDisksConfigure(ctx, d, m, false) // do_delta=false, as we are working on a new compute + err := utilityComputeExtraDisksConfigure(ctx, d, m, false) if err != nil { - log.Errorf("resourceComputeCreate: error when attaching extra disk(s) to a new Compute ID %d: %v", createdID, err) + log.Errorf("resourceComputeCreate: error when attaching extra disk(s) to a new Compute ID %d: %v", computeId, err) cleanup = true return diag.FromErr(err) } } - // Configure external networks if any - argVal, argSet = d.GetOk("network") - if argSet && argVal.(*schema.Set).Len() > 0 { - log.Debugf("resourceComputeCreate: calling utilityComputeNetworksConfigure to attach %d network(s)", argVal.(*schema.Set).Len()) - err := utilityComputeNetworksConfigure(ctx, d, m, false) // do_delta=false, as we are working on a new compute - if err != nil { - log.Errorf("resourceComputeCreate: error when attaching networks to a new Compute ID %d: %s", createdID, err) - cleanup = true - return diag.FromErr(err) + + if d.Get("started").(bool) { + req := compute.StartRequest{ComputeID: computeId} + log.Debugf("resourceComputeCreate: starting Compute ID %d after completing its resource configuration", computeId) + if _, err := c.CloudBroker().Compute().Start(ctx, req); err != nil { + warnings.Add(err) } } - if d.Get("started").(bool) { - req := compute.StartRequest{ - ComputeID: createdID, + if enabled, ok := d.GetOk("enabled"); ok { + if enabled.(bool) { + req := compute.EnableRequest{ComputeID: computeId} + log.Debugf("resourceComputeCreate: enable=%t Compute ID %d after completing its resource configuration", enabled, computeId) + if _, err := c.CloudBroker().Compute().Enable(ctx, req); err != nil { + warnings.Add(err) + } + } else { + req := compute.DisableRequest{ComputeID: computeId} + log.Debugf("resourceComputeCreate: enable=%t Compute ID %d after completing its resource configuration", enabled, computeId) + if _, err := c.CloudBroker().Compute().Disable(ctx, req); err != nil { + warnings.Add(err) + } } + } - log.Debugf("resourceComputeCreate: starting Compute ID %d after completing its resource configuration", createdID) - if _, err := c.CloudBroker().Compute().Start(ctx, req); err != nil { - cleanup = true - return diag.FromErr(err) + if !cleanup { + if affinityLabel, ok := d.GetOk("affinity_label"); ok { + req := compute.AffinityLabelSetRequest{ + ComputeIDs: []uint64{ + computeId, + }, + AffinityLabel: affinityLabel.(string), + } + + _, err := c.CloudBroker().Compute().AffinityLabelSet(ctx, req) + if err != nil { + warnings.Add(err) + } + } + + if disks, ok := d.GetOk("disks"); ok { + log.Debugf("resourceComputeCreate: Create disks on ComputeID: %d", computeId) + addedDisks := disks.([]interface{}) + if len(addedDisks) > 0 { + for _, disk := range addedDisks { + diskConv := disk.(map[string]interface{}) + req := compute.DiskAddRequest{ + ComputeID: computeId, + DiskName: diskConv["disk_name"].(string), + Size: uint64(diskConv["size"].(int)), + SepID: uint64(diskConv["sep_id"].(int)), + } + + if diskConv["disk_type"].(string) != "" { + req.DiskType = diskConv["disk_type"].(string) + } + if diskConv["pool"].(string) != "" { + req.Pool = diskConv["pool"].(string) + } + if diskConv["desc"].(string) != "" { + req.Description = diskConv["desc"].(string) + } + if diskConv["image_id"].(int) != 0 { + req.ImageID = uint64(diskConv["image_id"].(int)) + } + + _, err := c.CloudBroker().Compute().DiskAdd(ctx, req) + if err != nil { + cleanup = true + return diag.FromErr(err) + } + } + } + } + + if ars, ok := d.GetOk("affinity_rules"); ok { + log.Debugf("resourceComputeCreate: Create affinity rules on ComputeID: %d", computeId) + addedAR := ars.([]interface{}) + if len(addedAR) > 0 { + for _, ar := range addedAR { + arConv := ar.(map[string]interface{}) + req := compute.AffinityRuleAddRequest{ + ComputeIDs: []uint64{computeId}, + Topology: arConv["topology"].(string), + Policy: arConv["policy"].(string), + Mode: arConv["mode"].(string), + Key: arConv["key"].(string), + Value: arConv["value"].(string), + } + + _, err := c.CloudBroker().Compute().AffinityRuleAdd(ctx, req) + if err != nil { + warnings.Add(err) + } + } + } + } + + if ars, ok := d.GetOk("anti_affinity_rules"); ok { + log.Debugf("resourceComputeCreate: Create anti affinity rules on ComputeID: %d", computeId) + addedAR := ars.([]interface{}) + if len(addedAR) > 0 { + for _, ar := range addedAR { + arConv := ar.(map[string]interface{}) + req := compute.AntiAffinityRuleAddRequest{ + ComputeIDs: []uint64{computeId}, + Topology: arConv["topology"].(string), + Policy: arConv["policy"].(string), + Mode: arConv["mode"].(string), + Key: arConv["key"].(string), + Value: arConv["value"].(string), + } + + _, err := c.CloudBroker().Compute().AntiAffinityRuleAdd(ctx, req) + if err != nil { + warnings.Add(err) + } + } + } + } + } + + if tags, ok := d.GetOk("tags"); ok { + log.Debugf("resourceComputeCreate: Create tags on ComputeID: %d", computeId) + addedTags := tags.(*schema.Set).List() + if len(addedTags) > 0 { + for _, tagInterface := range addedTags { + tagItem := tagInterface.(map[string]interface{}) + req := compute.TagAddRequest{ + ComputeIDs: []uint64{computeId}, + Key: tagItem["key"].(string), + Value: tagItem["value"].(string), + } + + _, err := c.CloudBroker().Compute().TagAdd(ctx, req) + if err != nil { + warnings.Add(err) + } + } } } - log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", createdID, d.Get("name").(string)) + if pfws, ok := d.GetOk("port_forwarding"); ok { + log.Debugf("resourceComputeCreate: Create port farwarding on ComputeID: %d", computeId) + addedPfws := pfws.(*schema.Set).List() + if len(addedPfws) > 0 { + for _, pfwInterface := range addedPfws { + pfwItem := pfwInterface.(map[string]interface{}) + req := compute.PFWAddRequest{ + ComputeID: computeId, + PublicPortStart: uint64(pfwItem["public_port_start"].(int)), + PublicPortEnd: int64(pfwItem["public_port_end"].(int)), + LocalBasePort: uint64(pfwItem["local_port"].(int)), + Proto: pfwItem["proto"].(string), + } + + _, err := c.CloudBroker().Compute().PFWAdd(ctx, req) + if err != nil { + warnings.Add(err) + } + } + } + } + + if userAcess, ok := d.GetOk("user_access"); ok { + log.Debugf("resourceComputeCreate: Create user access on ComputeID: %d", computeId) + usersAcess := userAcess.(*schema.Set).List() + if len(usersAcess) > 0 { + for _, userAcessInterface := range usersAcess { + userAccessItem := userAcessInterface.(map[string]interface{}) + req := compute.UserGrantRequest{ + ComputeID: computeId, + Username: userAccessItem["username"].(string), + AccessType: userAccessItem["access_type"].(string), + } + + _, err := c.CloudBroker().Compute().UserGrant(ctx, req) + if err != nil { + warnings.Add(err) + } + } + } + } + + if snapshotList, ok := d.GetOk("snapshot"); ok { + log.Debugf("resourceComputeCreate: Create snapshot on ComputeID: %d", computeId) + snapshots := snapshotList.(*schema.Set).List() + if len(snapshots) > 0 { + for _, snapshotInterface := range snapshots { + snapshotItem := snapshotInterface.(map[string]interface{}) + req := compute.SnapshotCreateRequest{ + ComputeID: computeId, + Label: snapshotItem["label"].(string), + } + + _, err := c.CloudBroker().Compute().SnapshotCreate(ctx, req) + if err != nil { + warnings.Add(err) + } + } + } + } + + if cdtList, ok := d.GetOk("cd"); ok { + log.Debugf("resourceComputeCreate: Create cd on ComputeID: %d", computeId) + cds := cdtList.(*schema.Set).List() + if len(cds) > 0 { + snapshotItem := cds[0].(map[string]interface{}) + req := compute.CDInsertRequest{ + ComputeID: computeId, + CDROMID: uint64(snapshotItem["cdrom_id"].(int)), + } + + _, err := c.CloudBroker().Compute().CDInsert(ctx, req) + if err != nil { + warnings.Add(err) + } + } + } + + if d.Get("pin_to_stack").(bool) { + req := compute.PinToStackRequest{ + ComputeID: computeId, + } + _, err := c.CloudBroker().Compute().PinToStack(ctx, req) + if err != nil { + warnings.Add(err) + } + } + + if d.Get("pause").(bool) { + req := compute.PauseRequest{ + ComputeID: computeId, + } + _, err := c.CloudBroker().Compute().Pause(ctx, req) + if err != nil { + warnings.Add(err) + } + } + + log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", computeId, d.Get("name").(string)) - return dataSourceComputeRead(ctx, d, m) + // We may reuse dataSourceComputeRead here as we maintain similarity + // between Compute resource and Compute data source schemas + // Compute read function will also update resource ID on success, so that Terraform + // will know the resource exists + diags := resourceComputeRead(ctx, d, m) + + return append(diags, warnings.Get()...) } func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Debugf("resourceComputeRead: called for Compute name %s, RG ID %d", d.Get("name").(string), d.Get("rg_id").(int)) - compFacts, err := utilityComputeCheckPresence(ctx, d, m) - if compFacts == nil { + c := m.(*controller.ControllerCfg) + + computeRec, err := utilityComputeCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + hasChanged := false + + switch computeRec.Status { + case status.Deleted: + restoreReq := compute.RestoreRequest{ComputeID: computeRec.ID} + enableReq := compute.EnableRequest{ComputeID: computeRec.ID} + + _, err := c.CloudBroker().Compute().Restore(ctx, restoreReq) + if err != nil { + return diag.FromErr(err) + } + + _, err = c.CloudBroker().Compute().Enable(ctx, enableReq) if err != nil { return diag.FromErr(err) } - return nil + hasChanged = true + case status.Destroyed: + d.SetId("") + return resourceComputeCreate(ctx, d, m) + case status.Disabled: + log.Debugf("The compute is in status: %s, troubles may occur with update. Please, enable compute first.", computeRec.Status) + case status.Redeploying: + case status.Deleting: + case status.Destroying: + return diag.Errorf("The compute is in progress with status: %s", computeRec.Status) + case status.Modeled: + return diag.Errorf("The compute is in status: %s, please, contact support for more information", computeRec.Status) + } + + if hasChanged { + computeRec, err = utilityComputeCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } } - if err = flattenCompute(d, compFacts); err != nil { + d.SetId(strconv.FormatUint(computeRec.ID, 10)) + + if err = flattenCompute(d, computeRec); err != nil { return diag.FromErr(err) } @@ -216,22 +525,74 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf d.Id(), d.Get("name").(string), d.Get("rg_id").(int)) c := m.(*controller.ControllerCfg) - computeID, _ := strconv.ParseUint(d.Id(), 10, 64) - /* - 1. Resize CPU/RAM - 2. Resize (grow) boot disk - 3. Update extra disks - 4. Update networks - 5. Start/stop - */ + if diags := checkParamsExistence(ctx, d, c); diags != nil { + return diags + } + + computeRec, err := utilityComputeCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + if d.HasChange("enabled") { + enabled := d.Get("enabled").(bool) + if enabled { + req := compute.EnableRequest{ + ComputeID: computeRec.ID, + } + + if _, err := c.CloudBroker().Compute().Enable(ctx, req); err != nil { + return diag.FromErr(err) + } + } else { + req := compute.DisableRequest{ + ComputeID: computeRec.ID, + } + + if _, err := c.CloudBroker().Compute().Disable(ctx, req); err != nil { + return diag.FromErr(err) + } + } + log.Debugf("resourceComputeUpdate: enable=%s Compute ID %v after completing its resource configuration", d.Id(), enabled) + } + + // check compute statuses + switch computeRec.Status { + case status.Deleted: + restoreReq := compute.RestoreRequest{ComputeID: computeRec.ID} + enableReq := compute.EnableRequest{ComputeID: computeRec.ID} + + _, err := c.CloudBroker().Compute().Restore(ctx, restoreReq) + if err != nil { + return diag.FromErr(err) + } + + _, err = c.CloudBroker().Compute().Enable(ctx, enableReq) + if err != nil { + return diag.FromErr(err) + } + + case status.Destroyed: + d.SetId("") + return resourceComputeCreate(ctx, d, m) + case status.Disabled: + log.Debugf("The compute is in status: %s, may troubles can be occured with update. Please, enable compute first.", computeRec.Status) + case status.Redeploying: + case status.Deleting: + case status.Destroying: + return diag.Errorf("The compute is in progress with status: %s", computeRec.Status) + case status.Modeled: + return diag.Errorf("The compute is in status: %s, please, contant the support for more information", computeRec.Status) + } - // 1. Resize CPU/RAM + doUpdate := false resizeReq := compute.ResizeRequest{ - ComputeID: computeID, + ComputeID: computeRec.ID, Force: true, } - doUpdate := false + + warnings := dc.Warnings{} oldCpu, newCpu := d.GetChange("cpu") if oldCpu.(int) != newCpu.(int) { @@ -253,55 +614,195 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf log.Debugf("resourceComputeUpdate: changing CPU %d -> %d and/or RAM %d -> %d", oldCpu.(int), newCpu.(int), oldRam.(int), newRam.(int)) - _, err := c.CloudBroker().Compute().Resize(ctx, resizeReq) if err != nil { return diag.FromErr(err) } } - // 2. Resize (grow) Boot disk oldSize, newSize := d.GetChange("boot_disk_size") if oldSize.(int) < newSize.(int) { - req := disks.ResizeRequest{ - DiskID: uint64(d.Get("boot_disk_id").(int)), - Size: uint64(newSize.(int)), + req := compute.DiskResizeRequest{ComputeID: computeRec.ID} + if diskId, ok := d.GetOk("boot_disk_id"); ok { + req.DiskID = uint64(diskId.(int)) + + } else { + bootDisk, err := utilityComputeBootDiskCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + req.DiskID = bootDisk.ID } + req.Size = uint64(newSize.(int)) log.Debugf("resourceComputeUpdate: compute ID %s, boot disk ID %d resize %d -> %d", d.Id(), d.Get("boot_disk_id").(int), oldSize.(int), newSize.(int)) - _, err := c.CloudBroker().Disks().Resize(ctx, req) + _, err := c.CloudBroker().Compute().DiskResize(ctx, req) if err != nil { return diag.FromErr(err) } + } else if oldSize.(int) > newSize.(int) { log.Warnf("resourceComputeUpdate: compute ID %s - shrinking boot disk is not allowed", d.Id()) } - // 3. Calculate and apply changes to data disks - err := utilityComputeExtraDisksConfigure(ctx, d, m, true) // pass do_delta = true to apply changes, if any - if err != nil { - return diag.FromErr(err) + if d.HasChange("extra_disks") { + err := utilityComputeExtraDisksConfigure(ctx, d, m, true) // pass do_delta = true to apply changes, if any + if err != nil { + return diag.FromErr(err) + } } - // 4. Calculate and apply changes to network connections - err = utilityComputeNetworksConfigure(ctx, d, m, true) // pass do_delta = true to apply changes, if any - if err != nil { - return diag.FromErr(err) + if d.HasChange("network") { + err = utilityComputeNetworksConfigure(ctx, d, m, true, false, computeRec.ID) // pass do_delta = true to apply changes, if any + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("description") || d.HasChange("name") { + req := compute.UpdateRequest{ + ComputeID: computeRec.ID, + Name: d.Get("name").(string), + } + + if desc, ok := d.GetOk("desc"); ok { + req.Description = desc.(string) + } + + if _, err := c.CloudBroker().Compute().Update(ctx, req); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("disks") { + deletedDisks := make([]interface{}, 0) + addedDisks := make([]interface{}, 0) + updatedDisks := make([]interface{}, 0) + + oldDisks, newDisks := d.GetChange("disks") + oldConv := oldDisks.([]interface{}) + newConv := newDisks.([]interface{}) + + for _, el := range oldConv { + if !isContainsDisk(newConv, el) { + deletedDisks = append(deletedDisks, el) + } + } + + for _, el := range newConv { + if !isContainsDisk(oldConv, el) { + addedDisks = append(addedDisks, el) + } else { + if isChangeDisk(oldConv, el) { + updatedDisks = append(updatedDisks, el) + } + } + } + + if len(deletedDisks) > 0 { + stopReq := compute.StopRequest{ + ComputeID: computeRec.ID, + Force: false, + } + + _, err := c.CloudBroker().Compute().Stop(ctx, stopReq) + if err != nil { + return diag.FromErr(err) + } + + for _, disk := range deletedDisks { + diskConv := disk.(map[string]interface{}) + if diskConv["disk_type"].(string) == "B" { + continue + } + + req := compute.DiskDelRequest{ + ComputeID: computeRec.ID, + DiskID: uint64(diskConv["disk_id"].(int)), + Permanently: diskConv["permanently"].(bool), + } + + _, err := c.CloudBroker().Compute().DiskDel(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + req := compute.StartRequest{ + ComputeID: computeRec.ID, + AltBootID: 0, + } + _, err = c.CloudBroker().Compute().Start(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + + if len(addedDisks) > 0 { + for _, disk := range addedDisks { + diskConv := disk.(map[string]interface{}) + if diskConv["disk_type"].(string) == "B" { + continue + } + req := compute.DiskAddRequest{ + ComputeID: computeRec.ID, + DiskName: diskConv["disk_name"].(string), + Size: uint64(diskConv["size"].(int)), + SepID: uint64(diskConv["sep_id"].(int)), + } + + if diskConv["disk_type"].(string) != "" { + req.DiskType = diskConv["disk_type"].(string) + } + if diskConv["pool"].(string) != "" { + req.Pool = diskConv["pool"].(string) + } + if diskConv["desc"].(string) != "" { + req.Description = diskConv["desc"].(string) + } + if diskConv["image_id"].(int) != 0 { + req.ImageID = uint64(diskConv["image_id"].(int)) + } + _, err := c.CloudBroker().Compute().DiskAdd(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + + if len(updatedDisks) > 0 { + for _, disk := range updatedDisks { + diskConv := disk.(map[string]interface{}) + if diskConv["disk_type"].(string) == "B" { + continue + } + req := compute.DiskResizeRequest{ + ComputeID: computeRec.ID, + DiskID: uint64(diskConv["disk_id"].(int)), + Size: uint64(diskConv["size"].(int)), + } + + _, err := c.CloudBroker().Compute().DiskResize(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } } if d.HasChange("started") { if d.Get("started").(bool) { req := compute.StartRequest{ - ComputeID: computeID, + ComputeID: computeRec.ID, } if _, err := c.CloudBroker().Compute().Start(ctx, req); err != nil { return diag.FromErr(err) } } else { req := compute.StopRequest{ - ComputeID: computeID, + ComputeID: computeRec.ID, } if _, err := c.CloudBroker().Compute().Stop(ctx, req); err != nil { return diag.FromErr(err) @@ -309,204 +810,1322 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf } } - // we may reuse dataSourceComputeRead here as we maintain similarity - // between Compute resource and Compute data source schemas - return dataSourceComputeRead(ctx, d, m) -} - -func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceComputeDelete: called for Compute name %s, RG ID %d", - d.Get("name").(string), d.Get("rg_id").(int)) - - c := m.(*controller.ControllerCfg) - computeID, _ := strconv.ParseUint(d.Id(), 10, 64) + if d.HasChange("affinity_label") { + affinityLabel := d.Get("affinity_label").(string) + if affinityLabel == "" { + req := compute.AffinityLabelRemoveRequest{ + ComputeIDs: []uint64{computeRec.ID}, + } - req := compute.DeleteRequest{ - ComputeID: computeID, - Permanently: d.Get("permanently").(bool), - DetachDisks: d.Get("detach_disks").(bool), - } + _, err := c.CloudBroker().Compute().AffinityLabelRemove(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + req := compute.AffinityLabelSetRequest{ + ComputeIDs: []uint64{computeRec.ID}, + AffinityLabel: affinityLabel, + } - if _, err := c.CloudBroker().Compute().Delete(ctx, req); err != nil { - return diag.FromErr(err) + _, err := c.CloudBroker().Compute().AffinityLabelSet(ctx, req) + if err != nil { + return diag.FromErr(err) + } } - return nil -} + if d.HasChange("affinity_rules") { + deletedAR := make([]interface{}, 0) + addedAR := make([]interface{}, 0) -func ResourceCompute() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, + oldAR, newAR := d.GetChange("affinity_rules") + oldConv := oldAR.([]interface{}) + newConv := newAR.([]interface{}) - CreateContext: resourceComputeCreate, - ReadContext: resourceComputeRead, - UpdateContext: resourceComputeUpdate, - DeleteContext: resourceComputeDelete, + if len(newConv) == 0 { + req := compute.AffinityRulesClearRequest{ + ComputeIDs: []uint64{computeRec.ID}, + } - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, + _, err := c.CloudBroker().Compute().AffinityRulesClear(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } else { + for _, el := range oldConv { + if !isContainsAR(newConv, el) { + deletedAR = append(deletedAR, el) + } + } + for _, el := range newConv { + if !isContainsAR(oldConv, el) { + addedAR = append(addedAR, el) + } + } - Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout180s, - Read: &constants.Timeout30s, - Update: &constants.Timeout180s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.", - }, + if len(deletedAR) > 0 { + for _, ar := range deletedAR { + arConv := ar.(map[string]interface{}) + req := compute.AffinityRuleRemoveRequest{ + ComputeIDs: []uint64{computeRec.ID}, + Topology: arConv["topology"].(string), + Policy: arConv["policy"].(string), + Mode: arConv["mode"].(string), + Key: arConv["key"].(string), + Value: arConv["value"].(string), + } + + _, err := c.CloudBroker().Compute().AffinityRuleRemove(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + if len(addedAR) > 0 { + for _, ar := range addedAR { + arConv := ar.(map[string]interface{}) + req := compute.AffinityRuleAddRequest{ + ComputeIDs: []uint64{computeRec.ID}, + Topology: arConv["topology"].(string), + Policy: arConv["policy"].(string), + Mode: arConv["mode"].(string), + Key: arConv["key"].(string), + Value: arConv["value"].(string), + } + + _, err := c.CloudBroker().Compute().AffinityRuleAdd(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + } + } - "rg_id": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntAtLeast(1), - Description: "ID of the resource group where this compute should be deployed.", - }, + if d.HasChange("tags") { + oldSet, newSet := d.GetChange("tags") + deletedTags := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + if len(deletedTags) > 0 { + for _, tagInterface := range deletedTags { + tagItem := tagInterface.(map[string]interface{}) + req := compute.TagRemoveRequest{ + ComputeIDs: []uint64{computeRec.ID}, + Key: tagItem["key"].(string), + } + + _, err := c.CloudBroker().Compute().TagRemove(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } - "driver": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: statefuncs.StateFuncToUpper, - ValidateFunc: validation.StringInSlice([]string{"KVM_X86", "KVM_PPC"}, false), // observe case while validating - Description: "Hardware architecture of this compute instance.", - }, + addedTags := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + if len(addedTags) > 0 { + for _, tagInterface := range addedTags { + tagItem := tagInterface.(map[string]interface{}) + req := compute.TagAddRequest{ + ComputeIDs: []uint64{computeRec.ID}, + Key: tagItem["key"].(string), + Value: tagItem["value"].(string), + } + + _, err := c.CloudBroker().Compute().TagAdd(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + } - "cpu": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntBetween(1, constants.MaxCpusPerCompute), - Description: "Number of CPUs to allocate to this compute instance.", - }, + if d.HasChange("port_forwarding") { + oldSet, newSet := d.GetChange("port_forwarding") + deletedPfws := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + if len(deletedPfws) > 0 { + for _, pfwInterface := range deletedPfws { + pfwItem := pfwInterface.(map[string]interface{}) + req := compute.PFWDelRequest{ + ComputeID: computeRec.ID, + PublicPortStart: uint64(pfwItem["public_port_start"].(int)), + LocalBasePort: uint64(pfwItem["local_port"].(int)), + Proto: pfwItem["proto"].(string), + } + + if pfwItem["public_port_end"].(int) == -1 { + req.PublicPortEnd = req.PublicPortStart + } else { + req.PublicPortEnd = uint64(pfwItem["public_port_end"].(int)) + } + + _, err := c.CloudBroker().Compute().PFWDel(ctx, req) + if err != nil { + warnings.Add(err) + } + } + } - "ram": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntAtLeast(constants.MinRamPerCompute), - Description: "Amount of RAM in MB to allocate to this compute instance.", - }, + addedPfws := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + if len(addedPfws) > 0 { + for _, pfwInterface := range addedPfws { + pfwItem := pfwInterface.(map[string]interface{}) + req := compute.PFWAddRequest{ + ComputeID: computeRec.ID, + PublicPortStart: uint64(pfwItem["public_port_start"].(int)), + PublicPortEnd: int64(pfwItem["public_port_end"].(int)), + LocalBasePort: uint64(pfwItem["local_port"].(int)), + Proto: pfwItem["proto"].(string), + } + + _, err := c.CloudBroker().Compute().PFWAdd(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + } - "image_id": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: "ID of the OS image to base this compute instance on.", - }, + if d.HasChange("user_access") { + oldSet, newSet := d.GetChange("user_access") + deletedUserAcess := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + if len(deletedUserAcess) > 0 { + for _, userAcessInterface := range deletedUserAcess { + userAccessItem := userAcessInterface.(map[string]interface{}) + req := compute.UserRevokeRequest{ + ComputeID: computeRec.ID, + Username: userAccessItem["username"].(string), + } + + _, err := c.CloudBroker().Compute().UserRevoke(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } - "boot_disk_size": { - Type: schema.TypeInt, - Required: true, - Description: "This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image.", - }, + addedUserAccess := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + if len(addedUserAccess) > 0 { + for _, userAccessInterface := range addedUserAccess { + userAccessItem := userAccessInterface.(map[string]interface{}) + req := compute.UserGrantRequest{ + ComputeID: computeRec.ID, + Username: userAccessItem["username"].(string), + AccessType: userAccessItem["access_type"].(string), + } + + _, err := c.CloudBroker().Compute().UserGrant(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + } - "sep_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - Description: "ID of SEP to create bootDisk on. Uses image's sepId if not set.", - }, + if d.HasChange("snapshot") { + oldSet, newSet := d.GetChange("snapshot") + deletedSnapshots := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + if len(deletedSnapshots) > 0 { + for _, snapshotInterface := range deletedSnapshots { + snapshotItem := snapshotInterface.(map[string]interface{}) + req := compute.SnapshotDeleteRequest{ + ComputeID: computeRec.ID, + Label: snapshotItem["label"].(string), + } + + _, err := c.CloudBroker().Compute().SnapshotDelete(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } - "pool": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: "Pool to use if sepId is set, can be also empty if needed to be chosen by system.", - }, + addedSnapshots := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + if len(addedSnapshots) > 0 { + for _, snapshotInterface := range addedSnapshots { + snapshotItem := snapshotInterface.(map[string]interface{}) + req := compute.SnapshotCreateRequest{ + ComputeID: computeRec.ID, + Label: snapshotItem["label"].(string), + } + + _, err := c.CloudBroker().Compute().SnapshotCreate(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + } + + if d.HasChange("rollback") { + if rollback, ok := d.GetOk("rollback"); ok { + req := compute.StopRequest{ + ComputeID: computeRec.ID, + Force: false, + } + + _, err := c.CloudBroker().Compute().Stop(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + rollbackInterface := rollback.(*schema.Set).List()[0] + rollbackItem := rollbackInterface.(map[string]interface{}) + + rollbackReq := compute.SnapshotRollbackRequest{ + ComputeID: computeRec.ID, + Label: rollbackItem["label"].(string), + } + + _, err = c.CloudBroker().Compute().SnapshotRollback(ctx, rollbackReq) + if err != nil { + return diag.FromErr(err) + } + } + } + + if d.HasChange("cd") { + oldSet, newSet := d.GetChange("cd") + deletedCd := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + if len(deletedCd) > 0 { + req := compute.CDEjectRequest{ + ComputeID: computeRec.ID, + } + + _, err := c.CloudBroker().Compute().CDEject(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + + addedCd := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + if len(addedCd) > 0 { + cdItem := addedCd[0].(map[string]interface{}) + req := compute.CDInsertRequest{ + ComputeID: computeRec.ID, + CDROMID: uint64(cdItem["cdrom_id"].(int)), + } + + _, err := c.CloudBroker().Compute().CDInsert(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + + if d.HasChange("pin_to_stack") { + oldPin, newPin := d.GetChange("pin_to_stack") + if oldPin.(bool) == true && newPin.(bool) == false { + req := compute.UnpinFromStackRequest{ + ComputeID: computeRec.ID, + } + + _, err := c.CloudBroker().Compute().UnpinFromStack(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + if oldPin.(bool) == false && newPin.(bool) == true { + req := compute.PinToStackRequest{ + ComputeID: computeRec.ID, + } + + _, err := c.CloudBroker().Compute().PinToStack(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + + if d.HasChange("pause") { + oldPause, newPause := d.GetChange("pause") + if oldPause.(bool) == true && newPause.(bool) == false { + req := compute.ResumeRequest{ + ComputeID: computeRec.ID, + } + _, err := c.CloudBroker().Compute().Resume(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + if oldPause.(bool) == false && newPause.(bool) == true { + req := compute.PauseRequest{ + ComputeID: computeRec.ID, + } + + _, err := c.CloudBroker().Compute().Pause(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + + if d.HasChange("reset") { + oldReset, newReset := d.GetChange("reset") + if oldReset.(bool) == false && newReset.(bool) == true { + req := compute.ResetRequest{ + ComputeID: computeRec.ID, + } + _, err := c.CloudBroker().Compute().Reset(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + + if d.HasChange("image_id") { + oldImage, newImage := d.GetChange("image_id") + stopReq := compute.StopRequest{ + ComputeID: computeRec.ID, + Force: false, + } + + _, err := c.CloudBroker().Compute().Stop(ctx, stopReq) + if err != nil { + return diag.FromErr(err) + } + + if oldImage.(int) != newImage.(int) { + req := compute.RedeployRequest{ + ComputeID: computeRec.ID, + ImageID: uint64(newImage.(int)), + } + + if diskSize, ok := d.GetOk("boot_disk_size"); ok { + req.DiskSize = uint64(diskSize.(int)) + } + if dataDisks, ok := d.GetOk("data_disks"); ok { + req.DataDisks = dataDisks.(string) + } + if autoStart, ok := d.GetOk("auto_start"); ok { + req.AutoStart = autoStart.(bool) + } + if forceStop, ok := d.GetOk("force_stop"); ok { + req.ForceStop = forceStop.(bool) + } + + _, err := c.CloudBroker().Compute().Redeploy(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + + // if d.HasChange("custom_fields") { + // val := d.Get("custom_fields").(string) + // val = strings.ReplaceAll(val, "\\", "") + // val = strings.ReplaceAll(val, "\n", "") + // val = strings.ReplaceAll(val, "\t", "") + // val = strings.TrimSpace(val) + + // if len(val) > 0 { + // req := compute.SetCustomFieldsRequest{ + // ComputeID: computeRec.ID, + // CustomFields: val, + // } + + // _, err := c.CloudBroker().Compute().SetCustomFields(ctx, req) + // if err != nil { + // return diag.FromErr(err) + // } + // } else { + // req := compute.DeleteCustomFieldsRequest{ + // ComputeID: computeRec.ID, + // } + + // _, err := c.CloudBroker().Compute().DeleteCustomFields(ctx, req) + // if err != nil { + // return diag.FromErr(err) + // } + // } + // } todo: uncomment when sdk updates + + diags := resourceComputeRead(ctx, d, m) + + return append(diags, warnings.Get()...) +} + +func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + // NOTE: this function destroys target Compute instance "permanently", so + // there is no way to restore it. + // If compute being destroyed has some extra disks attached, they are + // detached from the compute + log.Debugf("resourceComputeDelete: called for Compute name %s, RG ID %d", + d.Get("name").(string), d.Get("rg_id").(int)) + + c := m.(*controller.ControllerCfg) + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + req := compute.DeleteRequest{ + ComputeID: computeId, + Permanently: d.Get("permanently").(bool), + DetachDisks: d.Get("detach_disks").(bool), + } + + if _, err := c.CloudBroker().Compute().Delete(ctx, req); err != nil { + return diag.FromErr(err) + } + + return nil +} + +func ResourceCompute() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourceComputeCreate, + ReadContext: resourceComputeRead, + UpdateContext: resourceComputeUpdate, + DeleteContext: resourceComputeDelete, - "extra_disks": { - Type: schema.TypeSet, - Optional: true, - MaxItems: constants.MaxExtraDisksPerCompute, - Elem: &schema.Schema{ - Type: schema.TypeInt, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, + }, + + Schema: resourceComputeSchemaMake(), + } +} + +func resourceComputeSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.", + }, + "rg_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(1), + Description: "ID of the resource group where this compute should be deployed.", + }, + "driver": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: statefuncs.StateFuncToUpper, + ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating + Description: "Hardware architecture of this compute instance.", + }, + "cpu": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, constants.MaxCpusPerCompute), + Description: "Number of CPUs to allocate to this compute instance.", + }, + "ram": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(constants.MinRamPerCompute), + Description: "Amount of RAM in MB to allocate to this compute instance.", + }, + "image_id": { + Type: schema.TypeInt, + Required: true, + //ForceNew: true, //REDEPLOY + Description: "ID of the OS image to base this compute instance on.", + }, + "boot_disk_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image.", + }, + "sep_id": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + Description: "ID of SEP to create bootDisk on. Uses image's sepId if not set.", + }, + "pool": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "Pool to use if sepId is set, can be also empty if needed to be chosen by system.", + }, + "cloud_init": { + Type: schema.TypeString, + Optional: true, + Description: "Optional cloud_init parameters. Applied when creating new compute instance only, ignored in all other cases.", + }, + "description": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Optional text description of this compute instance.", + }, + "started": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Is compute started.", + }, + "is": { + Type: schema.TypeString, + Optional: true, + Description: "system name", + }, + "ipa_type": { + Type: schema.TypeString, + Optional: true, + Description: "compute purpose", + }, + "custom_fields": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "network": { + Type: schema.TypeSet, + Optional: true, + MinItems: 1, + MaxItems: constants.MaxNetworksPerCompute, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "net_type": { + Type: schema.TypeString, + Required: true, + StateFunc: statefuncs.StateFuncToUpper, + ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS"}, false), // observe case while validating + Description: "Type of the network for this connection, either EXTNET or VINS.", + }, + + "net_id": { + Type: schema.TypeInt, + Required: true, + Description: "ID of the network for this connection.", + }, + + "ip_address": { + Type: schema.TypeString, + Optional: true, + Computed: true, + DiffSuppressFunc: networkSubresIPAddreDiffSupperss, + Description: "Optional IP address to assign to this connection. This IP should belong to the selected network and free for use.", + }, + + "mac": { + Type: schema.TypeString, + Computed: true, + Description: "MAC address associated with this connection. MAC address is assigned automatically.", + }, }, - Description: "Optional list of IDs of extra disks to attach to this compute. You may specify several extra disks.", }, - - "network": { - Type: schema.TypeSet, - Optional: true, - MaxItems: constants.MaxNetworksPerCompute, - Elem: &schema.Resource{ - Schema: networkSubresourceSchemaMake(), + Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.", + }, + "affinity_label": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Set affinity label for compute", + }, + "affinity_rules": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topology": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"node", "compute"}, false), + Description: "compute or node, for whom rule applies", + }, + "policy": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"RECOMMENDED", "REQUIRED"}, false), + Description: "RECOMMENDED or REQUIRED, the degree of 'strictness' of this rule", + }, + "mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"EQ", "NE", "ANY"}, false), + Description: "EQ or NE or ANY - the comparison mode is 'value', recorded by the specified 'key'", + }, + "key": { + Type: schema.TypeString, + Required: true, + Description: "key that are taken into account when analyzing this rule will be identified", + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: "value that must match the key to be taken into account when analyzing this rule", + }, }, - Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.", }, - - /* - "ssh_keys": { - Type: schema.TypeList, - Optional: true, - MaxItems: MaxSshKeysPerCompute, - Elem: &schema.Resource{ - Schema: sshSubresourceSchemaMake(), + }, + "anti_affinity_rules": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topology": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"node", "compute"}, false), + Description: "compute or node, for whom rule applies", + }, + "policy": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"RECOMMENDED", "REQUIRED"}, false), + Description: "RECOMMENDED or REQUIRED, the degree of 'strictness' of this rule", + }, + "mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"EQ", "NE", "ANY"}, false), + Description: "EQ or NE or ANY - the comparison mode is 'value', recorded by the specified 'key'", + }, + "key": { + Type: schema.TypeString, + Required: true, + Description: "key that are taken into account when analyzing this rule will be identified", + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: "value that must match the key to be taken into account when analyzing this rule", }, - Description: "SSH keys to authorize on this compute instance.", }, - */ - - "description": { - Type: schema.TypeString, - Optional: true, - Description: "Optional text description of this compute instance.", }, - - "cloud_init": { - Type: schema.TypeString, - Optional: true, - Default: "applied", - DiffSuppressFunc: cloudInitDiffSupperss, - Description: "Optional cloud_init parameters. Applied when creating new compute instance only, ignored in all other cases.", + }, + "disks": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_name": { + Type: schema.TypeString, + Required: true, + Description: "Name for disk", + }, + "size": { + Type: schema.TypeInt, + Required: true, + Description: "Disk size in GiB", + }, + "sep_id": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Storage endpoint provider ID; by default the same with boot disk", + }, + "disk_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"B", "D"}, false), + Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data'", + }, + "pool": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Pool name; by default will be chosen automatically", + }, + "desc": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional description", + }, + "image_id": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Specify image id for create disk from template", + }, + "permanently": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: "Disk deletion status", + }, + "disk_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Disk ID", + }, + "shareable": { + Type: schema.TypeBool, + Computed: true, + }, + "size_max": { + Type: schema.TypeInt, + Computed: true, + }, + "size_used": { + Type: schema.TypeInt, + Computed: true, + }, + }, }, - - // The rest are Compute properties, which are "computed" once it is created - "rg_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the resource group where this compute instance is located.", + }, + "with_default_vins": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Create compute with default resgroup ViNS (true) or without any interfaces (false). This parameter is ignored if network block is specified", + }, + "boot_disk": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_name": { + Type: schema.TypeString, + Required: true, + Description: "Name for disk", + }, + "size": { + Type: schema.TypeInt, + Required: true, + Description: "Disk size in GiB", + }, + "sep_id": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Storage endpoint provider ID; by default the same with boot disk", + }, + "disk_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"B", "D"}, false), + Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data'", + }, + "pool": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Pool name; by default will be chosen automatically", + }, + "desc": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional description", + }, + "image_id": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Specify image id for create disk from template", + }, + "permanently": { + Type: schema.TypeBool, + Optional: true, + Description: "Disk deletion status", + }, + "disk_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Disk ID", + }, + "shareable": { + Type: schema.TypeBool, + Computed: true, + }, + "size_used": { + Type: schema.TypeInt, + Computed: true, + }, + }, }, - - "account_id": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the account this compute instance belongs to.", + }, + "extra_disks": { + Type: schema.TypeSet, + Optional: true, + MaxItems: constants.MaxExtraDisksPerCompute, + Elem: &schema.Schema{ + Type: schema.TypeInt, }, - - "account_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the account this compute instance belongs to.", + Description: "Optional list of IDs of extra disks to attach to this compute. You may specify several extra disks.", + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, }, - - "boot_disk_id": { - Type: schema.TypeInt, - Computed: true, - Description: "This compute instance boot disk ID.", + }, + "port_forwarding": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "public_port_start": { + Type: schema.TypeInt, + Required: true, + }, + "public_port_end": { + Type: schema.TypeInt, + Optional: true, + Default: -1, + }, + "local_port": { + Type: schema.TypeInt, + Required: true, + }, + "proto": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"tcp", "udp"}, false), + }, + }, }, - - "os_users": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: osUsersSubresourceSchemaMake(), + }, + "user_access": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "username": { + Type: schema.TypeString, + Required: true, + }, + "access_type": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "snapshot": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "label": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "rollback": { + Type: schema.TypeSet, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "label": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "cd": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cdrom_id": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + "pin_to_stack": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "If true - enable compute, else - disable", + }, + "pause": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "reset": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "auto_start": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Flag for redeploy compute", + }, + "force_stop": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Flag for redeploy compute", + }, + "data_disks": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"KEEP", "DETACH", "DESTROY"}, false), + Default: "DETACH", + Description: "Flag for redeploy compute", + }, + "detach_disks": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "permanently": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + // Computed properties + "account_id": { + Type: schema.TypeInt, + Computed: true, + Description: "ID of the account this compute instance belongs to.", + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the account this compute instance belongs to.", + }, + "affinity_weight": { + Type: schema.TypeInt, + Computed: true, + }, + "arch": { + Type: schema.TypeString, + Computed: true, + }, + "boot_order": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "boot_disk_id": { + Type: schema.TypeInt, + Computed: true, + Description: "This compute instance boot disk ID.", + }, + "clone_reference": { + Type: schema.TypeInt, + Computed: true, + }, + "clones": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "computeci_id": { + Type: schema.TypeInt, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "devices": { + Type: schema.TypeString, + Computed: true, + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "compute_id": { + Type: schema.TypeInt, + Computed: true, + }, + "interfaces": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "conn_id": { + Type: schema.TypeInt, + Computed: true, + }, + "conn_type": { + Type: schema.TypeString, + Computed: true, + }, + "def_gw": { + Type: schema.TypeString, + Computed: true, + }, + "flip_group_id": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "listen_ssh": { + Type: schema.TypeBool, + Computed: true, + }, + "mac": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "net_id": { + Type: schema.TypeInt, + Computed: true, + }, + "netmask": { + Type: schema.TypeInt, + Computed: true, + }, + "net_type": { + Type: schema.TypeString, + Computed: true, + }, + "pci_slot": { + Type: schema.TypeInt, + Computed: true, + }, + "qos": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "e_rate": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "in_brust": { + Type: schema.TypeInt, + Computed: true, + }, + "in_rate": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "target": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "vnfs": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, }, - Description: "Guest OS users provisioned on this compute instance.", }, + }, + "lock_status": { + Type: schema.TypeString, + Computed: true, + }, + "manager_id": { + Type: schema.TypeInt, + Computed: true, + }, + "manager_type": { + Type: schema.TypeString, + Computed: true, + }, + "migrationjob": { + Type: schema.TypeInt, + Computed: true, + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + }, + "os_users": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "guid": { + Type: schema.TypeString, + Computed: true, + Description: "GUID of this guest OS user.", + }, + + "login": { + Type: schema.TypeString, + Computed: true, + Description: "Login name of this guest OS user.", + }, - "started": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: "Is compute started.", + "password": { + Type: schema.TypeString, + Computed: true, + //Sensitive: true, + Description: "Password of this guest OS user.", + }, + + "public_key": { + Type: schema.TypeString, + Computed: true, + Description: "SSH public key of this guest OS user.", + }, + }, + }, + Description: "Guest OS users provisioned on this compute instance.", + }, + "pinned": { + Type: schema.TypeBool, + Computed: true, + }, + "reference_id": { + Type: schema.TypeString, + Computed: true, + }, + "registered": { + Type: schema.TypeBool, + Computed: true, + }, + "res_name": { + Type: schema.TypeString, + Computed: true, + }, + "rg_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the resource group where this compute instance is located.", + }, + "snap_sets": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disks": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "label": { + Type: schema.TypeString, + Computed: true, + }, + "timestamp": { + Type: schema.TypeInt, + Computed: true, + }, + }, }, }, + "stack_id": { + Type: schema.TypeInt, + Computed: true, + Description: "ID of the stack, on which VM started", + }, + "stack_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the stack, on which VM started", + }, + "stateless_sep_id": { + Type: schema.TypeInt, + Computed: true, + }, + "stateless_sep_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "tech_status": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "user_managed": { + Type: schema.TypeBool, + Computed: true, + }, + "vgpus": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "virtual_image_id": { + Type: schema.TypeInt, + Computed: true, + }, } } diff --git a/internal/service/cloudbroker/kvmvm/utility_compute.go b/internal/service/cloudbroker/kvmvm/utility_compute.go index 9e34c28..a723877 100644 --- a/internal/service/cloudbroker/kvmvm/utility_compute.go +++ b/internal/service/cloudbroker/kvmvm/utility_compute.go @@ -33,12 +33,10 @@ package kvmvm import ( "context" - "fmt" "strconv" log "github.com/sirupsen/logrus" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -46,7 +44,6 @@ import ( func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error { c := m.(*controller.ControllerCfg) - computeID, _ := strconv.ParseUint(d.Id(), 10, 64) log.Debugf("utilityComputeExtraDisksConfigure: called for Compute ID %s with do_delta = %t", d.Id(), do_delta) @@ -61,8 +58,9 @@ func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceDa } for _, disk := range new_set.(*schema.Set).List() { + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) req := compute.DiskAttachRequest{ - ComputeID: computeID, + ComputeID: computeId, DiskID: uint64(disk.(int)), } @@ -84,32 +82,52 @@ func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceDa detach_set := old_set.(*schema.Set).Difference(new_set.(*schema.Set)) log.Debugf("utilityComputeExtraDisksConfigure: detach set has %d items for Compute ID %s", detach_set.Len(), d.Id()) - for _, diskId := range detach_set.List() { - req := compute.DiskDetachRequest{ - ComputeID: computeID, - DiskID: uint64(diskId.(int)), + + if detach_set.Len() > 0 { + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + stopReq := compute.StopRequest{ + ComputeID: computeId, + Force: false, + } + _, err := c.CloudBroker().Compute().Stop(ctx, stopReq) + if err != nil { + return err } - _, err := c.CloudBroker().Compute().DiskDetach(ctx, req) + for _, diskId := range detach_set.List() { + req := compute.DiskDetachRequest{ + ComputeID: computeId, + DiskID: uint64(diskId.(int)), + } + _, err := c.CloudBroker().Compute().DiskDetach(ctx, req) + if err != nil { + log.Errorf("utilityComputeExtraDisksConfigure: failed to detach disk ID %d from Compute ID %s: %s", diskId.(int), d.Id(), err) + apiErrCount++ + lastSavedError = err + } + } + + req := compute.StartRequest{ + ComputeID: computeId, + AltBootID: 0, + } + _, err = c.CloudBroker().Compute().Start(ctx, req) if err != nil { - // failed to detach disk - there will be partial resource update - log.Errorf("utilityComputeExtraDisksConfigure: failed to detach disk ID %d from Compute ID %s: %s", diskId.(int), d.Id(), err) - apiErrCount++ - lastSavedError = err + return err } } attach_set := new_set.(*schema.Set).Difference(old_set.(*schema.Set)) log.Debugf("utilityComputeExtraDisksConfigure: attach set has %d items for Compute ID %s", attach_set.Len(), d.Id()) for _, diskId := range attach_set.List() { + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) req := compute.DiskAttachRequest{ - ComputeID: computeID, + ComputeID: computeId, DiskID: uint64(diskId.(int)), } _, err := c.CloudBroker().Compute().DiskAttach(ctx, req) if err != nil { - // failed to attach disk - there will be partial resource update log.Errorf("utilityComputeExtraDisksConfigure: failed to attach disk ID %d to Compute ID %s: %s", diskId.(int), d.Id(), err) apiErrCount++ lastSavedError = err @@ -125,12 +143,50 @@ func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceDa return nil } -func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error { +func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*compute.RecordCompute, error) { + c := m.(*controller.ControllerCfg) + req := compute.GetRequest{} + + if d.Id() != "" { + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + req.ComputeID = computeId + } else { + req.ComputeID = uint64(d.Get("compute_id").(int)) + } + + res, err := c.CloudBroker().Compute().Get(ctx, req) + if err != nil { + return nil, err + } + + return res, nil +} + +func networkSubresIPAddreDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool { + if newVal != "" && newVal != oldVal { + log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal) + return false + } + log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=TRUE", key, oldVal, newVal) + return true // suppress difference +} + +func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool, skip_zero bool, computeID uint64) error { c := m.(*controller.ControllerCfg) - computeID, _ := strconv.ParseUint(d.Id(), 10, 64) old_set, new_set := d.GetChange("network") + req := compute.StopRequest{ + ComputeID: computeID, + Force: true, + } + + log.Debugf("utilityComputeNetworksConfigure: stopping compute %d", computeID) + _, err := c.CloudBroker().Compute().Stop(ctx, req) + if err != nil { + return err + } + apiErrCount := 0 var lastSavedError error @@ -139,10 +195,14 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData return nil } - for _, runner := range new_set.(*schema.Set).List() { + for i, runner := range new_set.(*schema.Set).List() { + if i == 0 && skip_zero { + continue + } net_data := runner.(map[string]interface{}) + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) req := compute.NetAttachRequest{ - ComputeID: computeID, + ComputeID: computeId, NetType: net_data["net_type"].(string), NetID: uint64(net_data["net_id"].(int)), } @@ -170,15 +230,15 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData log.Debugf("utilityComputeNetworksConfigure: detach set has %d items for Compute ID %s", detach_set.Len(), d.Id()) for _, runner := range detach_set.List() { net_data := runner.(map[string]interface{}) + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) req := compute.NetDetachRequest{ - ComputeID: computeID, + ComputeID: computeId, IPAddr: net_data["ip_address"].(string), MAC: net_data["mac"].(string), } _, err := c.CloudBroker().Compute().NetDetach(ctx, req) if err != nil { - // failed to detach this network - there will be partial resource update log.Errorf("utilityComputeNetworksConfigure: failed to detach net ID %d of type %s from Compute ID %s: %s", net_data["net_id"].(int), net_data["net_type"].(string), d.Id(), err) apiErrCount++ @@ -190,10 +250,11 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", attach_set.Len(), d.Id()) for _, runner := range attach_set.List() { net_data := runner.(map[string]interface{}) + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) req := compute.NetAttachRequest{ - ComputeID: computeID, - NetID: uint64(net_data["net_id"].(int)), + ComputeID: computeId, NetType: net_data["net_type"].(string), + NetID: uint64(net_data["net_id"].(int)), } if net_data["ip_address"].(string) != "" { @@ -202,7 +263,6 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData _, err := c.CloudBroker().Compute().NetAttach(ctx, req) if err != nil { - // failed to attach this network - there will be partial resource update log.Errorf("utilityComputeNetworksConfigure: failed to attach net ID %d of type %s to Compute ID %s: %s", net_data["net_id"].(int), net_data["net_type"].(string), d.Id(), err) apiErrCount++ @@ -210,6 +270,15 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData } } + startReq := compute.StartRequest{ComputeID: computeID} + + log.Debugf("utilityComputeNetworksConfigure: starting compute %d", computeID) + _, err = c.CloudBroker().Compute().Start(ctx, startReq) + if err != nil { + apiErrCount++ + lastSavedError = err + } + if apiErrCount > 0 { log.Errorf("utilityComputeNetworksConfigure: there were %d error(s) when managing networks of Compute ID %s. Last error was: %s", apiErrCount, d.Id(), lastSavedError) @@ -219,73 +288,40 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData return nil } -func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*compute.RecordCompute, error) { - c := m.(*controller.ControllerCfg) - - idSet := false - computeID, err := strconv.ParseUint(d.Id(), 10, 64) - if err != nil || computeID <= 0 { - computeId, argSet := d.GetOk("compute_id") // NB: compute_id is NOT present in computeResource schema! - if argSet { - computeID = uint64(computeId.(int)) - idSet = true +func isChangeDisk(els []interface{}, el interface{}) bool { + for _, elOld := range els { + elOldConv := elOld.(map[string]interface{}) + elConv := el.(map[string]interface{}) + if elOldConv["disk_id"].(int) == elConv["disk_id"].(int) && + elOldConv["size"].(int) != elConv["size"].(int) { + return true } - } else { - idSet = true } + return false +} - if idSet { - // compute ID is specified, try to get compute instance straight by this ID - log.Debugf("utilityComputeCheckPresence: locating compute by its ID %d", computeID) - req := compute.GetRequest{ - ComputeID: computeID, - } - - computeFacts, err := c.CloudBroker().Compute().Get(ctx, req) - if err != nil { - return nil, err +func isContainsDisk(els []interface{}, el interface{}) bool { + for _, elOld := range els { + elOldConv := elOld.(map[string]interface{}) + elConv := el.(map[string]interface{}) + if elOldConv["disk_name"].(string) == elConv["disk_name"].(string) { + return true } - return computeFacts, nil } + return false +} - // ID was not set in the schema upon entering this function - work through Compute name - // and RG ID - computeName, argSet := d.GetOk("name") - if !argSet { - return nil, fmt.Errorf("Cannot locate compute instance if name is empty and no compute ID specified") - } - - rgId, argSet := d.GetOk("rg_id") - if !argSet { - return nil, fmt.Errorf("Cannot locate compute by name %s if no resource group ID is set", computeName.(string)) - } - - rgListComputesReq := rg.ListComputesRequest{ - RGID: uint64(rgId.(int)), - } - - computeList, err := c.CloudBroker().RG().ListComputes(ctx, rgListComputesReq) - if err != nil { - return nil, err - } - - log.Debugf("utilityComputeCheckPresence: traversing decoded JSON of length %d", len(computeList.Data)) - for index, item := range computeList.Data { - // need to match Compute by name, skip Computes with the same name in DESTROYED satus - if item.Name == computeName.(string) && item.Status != "DESTROYED" { - log.Debugf("utilityComputeCheckPresence: index %d, matched name %s", index, item.Name) - // we found the Compute we need - now get detailed information via compute/get API - req := compute.GetRequest{ - ComputeID: item.ID, - } - - apiResp, err := c.CloudBroker().Compute().Get(ctx, req) - if err != nil { - return nil, err - } - return apiResp, nil +func isContainsAR(els []interface{}, el interface{}) bool { + for _, elOld := range els { + elOldConv := elOld.(map[string]interface{}) + elConv := el.(map[string]interface{}) + if elOldConv["key"].(string) == elConv["key"].(string) && + elOldConv["value"].(string) == elConv["value"].(string) && + elOldConv["mode"].(string) == elConv["mode"].(string) && + elOldConv["topology"].(string) == elConv["topology"].(string) && + elOldConv["policy"].(string) == elConv["policy"].(string) { + return true } } - - return nil, nil + return false } diff --git a/internal/service/cloudbroker/account/account_audit_ds_subresource.go b/internal/service/cloudbroker/kvmvm/utility_compute_boot_disk.go similarity index 63% rename from internal/service/cloudbroker/account/account_audit_ds_subresource.go rename to internal/service/cloudbroker/kvmvm/utility_compute_boot_disk.go index 89059aa..2248f73 100644 --- a/internal/service/cloudbroker/account/account_audit_ds_subresource.go +++ b/internal/service/cloudbroker/kvmvm/utility_compute_boot_disk.go @@ -1,59 +1,57 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - -func dataSourceAccountAuditSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "call": { - Type: schema.TypeString, - Computed: true, - }, - "responsetime": { - Type: schema.TypeFloat, - Computed: true, - }, - "statuscode": { - Type: schema.TypeInt, - Computed: true, - }, - "timestamp": { - Type: schema.TypeFloat, - Computed: true, - }, - "user": { - Type: schema.TypeString, - Computed: true, - }, - } -} +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, +Nikita Sorokin, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package kvmvm + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute" +) + +func utilityComputeBootDiskCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*compute.ItemDisk, error) { + computeRecord, err := utilityComputeCheckPresence(ctx, d, m) + if err != nil { + return nil, err + } + + bootDisk := &compute.ItemDisk{} + for _, disk := range computeRecord.Disks { + if disk.Name == "bootdisk" { + *bootDisk = disk + break + } + } + return bootDisk, nil +} diff --git a/internal/service/cloudbroker/lb/data_source_lb.go b/internal/service/cloudbroker/lb/data_source_lb.go new file mode 100644 index 0000000..5b8adab --- /dev/null +++ b/internal/service/cloudbroker/lb/data_source_lb.go @@ -0,0 +1,70 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + lb, err := utilityLBCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.FormatUint(lb.ID, 10)) + + flattenLB(d, lb) + + return nil +} + +func DataSourceLB() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceLBRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dsLBSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/lb/data_source_lb_list.go b/internal/service/cloudbroker/lb/data_source_lb_list.go new file mode 100644 index 0000000..bd49b94 --- /dev/null +++ b/internal/service/cloudbroker/lb/data_source_lb_list.go @@ -0,0 +1,70 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceLBListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + lbList, err := utilityLBListCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenLBList(lbList)) + d.Set("entry_count", lbList.EntryCount) + + return nil +} + +func DataSourceLBList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceLBListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dsLBListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/lb/data_source_lb_list_deleted.go b/internal/service/cloudbroker/lb/data_source_lb_list_deleted.go new file mode 100644 index 0000000..49ae8a0 --- /dev/null +++ b/internal/service/cloudbroker/lb/data_source_lb_list_deleted.go @@ -0,0 +1,70 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceLBListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + lbList, err := utilityLBListDeletedCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenLBList(lbList)) + d.Set("entry_count", lbList.EntryCount) + + return nil +} + +func DataSourceLBListDeleted() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceLBListDeletedRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dsLBListDeletedSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/lb/flattens.go b/internal/service/cloudbroker/lb/flattens.go new file mode 100644 index 0000000..520cc46 --- /dev/null +++ b/internal/service/cloudbroker/lb/flattens.go @@ -0,0 +1,262 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import ( + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" +) + +func flattenLBFrontendBind(d *schema.ResourceData, b *lb.ItemBinding, lbId int64, frontendName string) { + d.Set("lb_id", lbId) + d.Set("frontend_name", frontendName) + d.Set("name", b.Name) + d.Set("address", b.Address) + d.Set("guid", b.GUID) + d.Set("port", b.Port) +} + +func flattenLBFrontend(d *schema.ResourceData, f *lb.ItemFrontend, lbId int64) { + d.Set("lb_id", lbId) + d.Set("backend_name", f.Backend) + d.Set("name", f.Name) + d.Set("guid", f.GUID) + d.Set("bindings", flattendBindings(f.Bindings)) +} + +func flattenResourceLBBackendServer(d *schema.ResourceData, s *lb.ItemServer, lbId int64, backendName string) { + d.Set("lb_id", lbId) + d.Set("backend_name", backendName) + d.Set("name", s.Name) + d.Set("port", s.Port) + d.Set("address", s.Address) + d.Set("check", s.Check) + d.Set("guid", s.GUID) + d.Set("downinter", s.ServerSettings.DownInter) + d.Set("fall", s.ServerSettings.Fall) + d.Set("inter", s.ServerSettings.Inter) + d.Set("maxconn", s.ServerSettings.MaxConn) + d.Set("maxqueue", s.ServerSettings.MaxQueue) + d.Set("rise", s.ServerSettings.Rise) + d.Set("slowstart", s.ServerSettings.SlowStart) + d.Set("weight", s.ServerSettings.Weight) + +} + +func flattenResourceLBBackend(d *schema.ResourceData, b *lb.ItemBackend, lbId int64) { + d.Set("lb_id", lbId) + d.Set("name", b.Name) + d.Set("algorithm", b.Algorithm) + d.Set("guid", b.GUID) + d.Set("downinter", b.ServerDefaultSettings.DownInter) + d.Set("fall", b.ServerDefaultSettings.Fall) + d.Set("inter", b.ServerDefaultSettings.Inter) + d.Set("maxconn", b.ServerDefaultSettings.MaxConn) + d.Set("maxqueue", b.ServerDefaultSettings.MaxQueue) + d.Set("rise", b.ServerDefaultSettings.Rise) + d.Set("slowstart", b.ServerDefaultSettings.SlowStart) + d.Set("weight", b.ServerDefaultSettings.Weight) + d.Set("servers", flattenServers(b.Servers)) +} + +func flattenLB(d *schema.ResourceData, lb *lb.RecordLB) { + d.Set("ha_mode", lb.HAMode) + d.Set("ckey", lb.CKey) + d.Set("meta", flattens.FlattenMeta(lb.Meta)) + d.Set("backends", flattenLBBackends(lb.Backends)) + d.Set("desc", lb.Description) + d.Set("dp_api_user", lb.DPAPIUser) + d.Set("dp_api_password", lb.DPAPIPassword) + d.Set("extnet_id", lb.ExtNetID) + d.Set("frontends", flattenFrontends(lb.Frontends)) + d.Set("gid", lb.GID) + d.Set("guid", lb.GUID) + d.Set("lb_id", lb.ID) + d.Set("image_id", lb.ImageID) + d.Set("milestones", lb.Milestones) + d.Set("name", lb.Name) + d.Set("primary_node", flattenNode(lb.PrimaryNode)) + d.Set("rg_id", lb.RGID) + d.Set("secondary_node", flattenNode(lb.SecondaryNode)) + d.Set("status", lb.Status) + d.Set("tech_status", lb.TechStatus) + d.Set("vins_id", lb.VINSID) +} + +func flattenNode(node lb.Node) []map[string]interface{} { + temp := make([]map[string]interface{}, 0) + n := map[string]interface{}{ + "backend_ip": node.BackendIP, + "compute_id": node.ComputeID, + "frontend_ip": node.FrontendIP, + "guid": node.GUID, + "mgmt_ip": node.MGMTIP, + "network_id": node.NetworkID, + } + + temp = append(temp, n) + + return temp +} + +func flattendBindings(bs []lb.ItemBinding) []map[string]interface{} { + temp := make([]map[string]interface{}, 0, len(bs)) + for _, b := range bs { + t := map[string]interface{}{ + "address": b.Address, + "guid": b.GUID, + "name": b.Name, + "port": b.Port, + } + temp = append(temp, t) + } + return temp +} + +func flattenFrontends(fs []lb.ItemFrontend) []map[string]interface{} { + temp := make([]map[string]interface{}, 0, len(fs)) + for _, f := range fs { + t := map[string]interface{}{ + "backend": f.Backend, + "bindings": flattendBindings(f.Bindings), + "guid": f.GUID, + "name": f.Name, + } + temp = append(temp, t) + } + + return temp +} + +func flattenServers(servers []lb.ItemServer) []map[string]interface{} { + temp := make([]map[string]interface{}, 0, len(servers)) + for _, server := range servers { + t := map[string]interface{}{ + "address": server.Address, + "check": server.Check, + "guid": server.GUID, + "name": server.Name, + "port": server.Port, + "server_settings": flattenServerSettings(server.ServerSettings), + } + + temp = append(temp, t) + } + return temp +} + +func flattenServerSettings(defSet lb.ServerSettings) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "downinter": defSet.DownInter, + "fall": defSet.Fall, + "guid": defSet.GUID, + "inter": defSet.Inter, + "maxconn": defSet.MaxConn, + "maxqueue": defSet.MaxQueue, + "rise": defSet.Rise, + "slowstart": defSet.SlowStart, + "weight": defSet.Weight, + } + res = append(res, temp) + return res +} + +func flattenLBBackends(backends []lb.ItemBackend) []map[string]interface{} { + temp := make([]map[string]interface{}, 0, len(backends)) + for _, item := range backends { + t := map[string]interface{}{ + "algorithm": item.Algorithm, + "guid": item.GUID, + "name": item.Name, + "server_default_settings": flattenServerSettings(item.ServerDefaultSettings), + "servers": flattenServers(item.Servers), + } + temp = append(temp, t) + } + return temp +} + +func flattenLBList(lbl *lb.ListLB) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(lbl.Data)) + for _, lb := range lbl.Data { + temp := map[string]interface{}{ + "ha_mode": lb.HAMode, + "acl": flattenACl(lb.ACL), + "backends": flattenLBBackends(lb.Backends), + "created_by": lb.CreatedBy, + "created_time": lb.CreatedTime, + "deleted_by": lb.DeletedBy, + "deleted_time": lb.DeletedTime, + "desc": lb.Description, + "dp_api_user": lb.DPAPIUser, + "dp_api_password": lb.DPAPIPassword, + "extnet_id": lb.ExtNetID, + "frontends": flattenFrontends(lb.Frontends), + "gid": lb.GID, + "guid": lb.GUID, + "lb_id": lb.ID, + "milestones": lb.Milestones, + "name": lb.Name, + "primary_node": flattenNode(lb.PrimaryNode), + "rg_id": lb.RGID, + "rg_name": lb.RGName, + "secondary_node": flattenNode(lb.SecondaryNode), + "status": lb.Status, + "tech_status": lb.TechStatus, + "updated_by": lb.UpdatedBy, + "updated_time": lb.UpdatedTime, + "vins_id": lb.VINSID, + } + res = append(res, temp) + } + return res +} + +func flattenACl(m interface{}) string { + switch d := m.(type) { + case string: + return d + case int: + return strconv.Itoa(d) + case int64: + return strconv.FormatInt(d, 10) + case float64: + return strconv.FormatInt(int64(d), 10) + default: + return "" + } +} diff --git a/internal/service/cloudbroker/lb/lb_data_subresource.go b/internal/service/cloudbroker/lb/lb_data_subresource.go new file mode 100644 index 0000000..3d0341d --- /dev/null +++ b/internal/service/cloudbroker/lb/lb_data_subresource.go @@ -0,0 +1,177 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + +func dsLBSchemaMake() map[string]*schema.Schema { + sch := createLBSchema() + sch["lb_id"] = &schema.Schema{ + Type: schema.TypeInt, + Required: true, + } + return sch +} + + +func dsLBListDeletedSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "by_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by ID", + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by name", + }, + "account_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by Account ID", + }, + "rg_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by RG ID", + }, + "tech_status": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by TechStatus", + }, + "front_ip": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by FrontIP", + }, + "back_ip": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by BackIP", + }, + "page": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + "size": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: createLBListSchema(), + }, + }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + }, + } +} + +func dsLBListSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "by_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by ID", + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by name", + }, + "account_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by Account ID", + }, + "rg_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Filter by RG ID", + }, + "tech_status": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by TechStatus", + }, + "status": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by Status", + }, + "front_ip": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by FrontIP", + }, + "back_ip": { + Type: schema.TypeString, + Optional: true, + Description: "Filter by BackIP", + }, + "includedeleted": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "page": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + "size": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: createLBListSchema(), + }, + }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/lb/lb_resource_subresource.go b/internal/service/cloudbroker/lb/lb_resource_subresource.go new file mode 100644 index 0000000..7a82cf5 --- /dev/null +++ b/internal/service/cloudbroker/lb/lb_resource_subresource.go @@ -0,0 +1,95 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + +func lbResourceSchemaMake() map[string]*schema.Schema { + sch := createLBSchema() + sch["rg_id"] = &schema.Schema{ + Type: schema.TypeInt, + Required: true, + } + + sch["name"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + } + + sch["extnet_id"] = &schema.Schema{ + Type: schema.TypeInt, + Required: true, + } + + sch["vins_id"] = &schema.Schema{ + Type: schema.TypeInt, + Required: true, + } + + sch["start"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + } + + sch["desc"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + } + + sch["enable"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + } + + sch["restart"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + } + + sch["restore"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + } + + sch["config_reset"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + } + + sch["permanently"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + } + return sch +} diff --git a/internal/service/cloudbroker/lb/lb_schema.go b/internal/service/cloudbroker/lb/lb_schema.go new file mode 100644 index 0000000..9f30ca8 --- /dev/null +++ b/internal/service/cloudbroker/lb/lb_schema.go @@ -0,0 +1,702 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + +func createLBSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "ha_mode": { + Type: schema.TypeBool, + Computed: true, + }, + "ckey": { + Type: schema.TypeString, + Computed: true, + }, + "meta": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "backends": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "algorithm": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "server_default_settings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "downinter": { + Type: schema.TypeInt, + Computed: true, + }, + "fall": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "inter": { + Type: schema.TypeInt, + Computed: true, + }, + "maxconn": { + Type: schema.TypeInt, + Computed: true, + }, + "maxqueue": { + Type: schema.TypeInt, + Computed: true, + }, + "rise": { + Type: schema.TypeInt, + Computed: true, + }, + "slowstart": { + Type: schema.TypeInt, + Computed: true, + }, + "weight": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "servers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + }, + "check": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + "server_settings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "downinter": { + Type: schema.TypeInt, + Computed: true, + }, + "fall": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "inter": { + Type: schema.TypeInt, + Computed: true, + }, + "maxconn": { + Type: schema.TypeInt, + Computed: true, + }, + "maxqueue": { + Type: schema.TypeInt, + Computed: true, + }, + "rise": { + Type: schema.TypeInt, + Computed: true, + }, + "slowstart": { + Type: schema.TypeInt, + Computed: true, + }, + "weight": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "desc": { + Type: schema.TypeString, + Computed: true, + }, + "dp_api_user": { + Type: schema.TypeString, + Computed: true, + }, + "dp_api_password": { + Type: schema.TypeString, + Computed: true, + }, + "extnet_id": { + Type: schema.TypeInt, + Computed: true, + }, + "frontends": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backend": { + Type: schema.TypeString, + Computed: true, + }, + "bindings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "lb_id": { + Type: schema.TypeInt, + Computed: true, + }, + "image_id": { + Type: schema.TypeInt, + Computed: true, + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "primary_node": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backend_ip": { + Type: schema.TypeString, + Computed: true, + }, + "compute_id": { + Type: schema.TypeInt, + Computed: true, + }, + "frontend_ip": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "mgmt_ip": { + Type: schema.TypeString, + Computed: true, + }, + "network_id": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "rg_id": { + Type: schema.TypeInt, + Computed: true, + }, + "secondary_node": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backend_ip": { + Type: schema.TypeString, + Computed: true, + }, + "compute_id": { + Type: schema.TypeInt, + Computed: true, + }, + "frontend_ip": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "mgmt_ip": { + Type: schema.TypeString, + Computed: true, + }, + "network_id": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "tech_status": { + Type: schema.TypeString, + Computed: true, + }, + "vins_id": { + Type: schema.TypeInt, + Computed: true, + }, + } +} + +func createLBListSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "ha_mode": { + Type: schema.TypeBool, + Computed: true, + }, + "acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "backends": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "algorithm": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "server_default_settings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "downinter": { + Type: schema.TypeInt, + Computed: true, + }, + "fall": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "inter": { + Type: schema.TypeInt, + Computed: true, + }, + "maxconn": { + Type: schema.TypeInt, + Computed: true, + }, + "maxqueue": { + Type: schema.TypeInt, + Computed: true, + }, + "rise": { + Type: schema.TypeInt, + Computed: true, + }, + "slowstart": { + Type: schema.TypeInt, + Computed: true, + }, + "weight": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "servers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + }, + "check": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + "server_settings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "downinter": { + Type: schema.TypeInt, + Computed: true, + }, + "fall": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "inter": { + Type: schema.TypeInt, + Computed: true, + }, + "maxconn": { + Type: schema.TypeInt, + Computed: true, + }, + "maxqueue": { + Type: schema.TypeInt, + Computed: true, + }, + "rise": { + Type: schema.TypeInt, + Computed: true, + }, + "slowstart": { + Type: schema.TypeInt, + Computed: true, + }, + "weight": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "desc": { + Type: schema.TypeString, + Computed: true, + }, + "dp_api_user": { + Type: schema.TypeString, + Computed: true, + }, + "dp_api_password": { + Type: schema.TypeString, + Computed: true, + }, + "extnet_id": { + Type: schema.TypeInt, + Computed: true, + }, + "frontends": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backend": { + Type: schema.TypeString, + Computed: true, + }, + "bindings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "lb_id": { + Type: schema.TypeInt, + Computed: true, + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "primary_node": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backend_ip": { + Type: schema.TypeString, + Computed: true, + }, + "compute_id": { + Type: schema.TypeInt, + Computed: true, + }, + "frontend_ip": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "mgmt_ip": { + Type: schema.TypeString, + Computed: true, + }, + "network_id": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "rg_id": { + Type: schema.TypeInt, + Computed: true, + }, + "rg_name": { + Type: schema.TypeString, + Computed: true, + }, + "secondary_node": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backend_ip": { + Type: schema.TypeString, + Computed: true, + }, + "compute_id": { + Type: schema.TypeInt, + Computed: true, + }, + "frontend_ip": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "mgmt_ip": { + Type: schema.TypeString, + Computed: true, + }, + "network_id": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "tech_status": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "vins_id": { + Type: schema.TypeInt, + Computed: true, + }, + } +} \ No newline at end of file diff --git a/internal/service/cloudbroker/lb/resource_check_input_values.go b/internal/service/cloudbroker/lb/resource_check_input_values.go new file mode 100644 index 0000000..cbc4dc4 --- /dev/null +++ b/internal/service/cloudbroker/lb/resource_check_input_values.go @@ -0,0 +1,68 @@ +package lb + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func existLBID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + lbId := uint64(d.Get("lb_id").(int)) + + req := lb.ListRequest{} + + lbList, err := c.CloudBroker().LB().List(ctx, req) + if err != nil { + return false, err + } + + return len(lbList.FilterByID(lbId).Data) != 0, nil +} + +func existRGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + rgId := uint64(d.Get("rg_id").(int)) + + req := rg.ListRequest{} + + rgList, err := c.CloudBroker().RG().List(ctx, req) + if err != nil { + return false, err + } + + return len(rgList.FilterByID(rgId).Data) != 0, nil +} + +func existExtNetID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + extNetID := uint64(d.Get("extnet_id").(int)) + + req := extnet.ListRequest{} + + extNetList, err := c.CloudBroker().ExtNet().List(ctx, req) + if err != nil { + return false, err + } + + return len(extNetList.FilterByID(extNetID).Data) != 0, nil +} + +func existViNSID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + vinsID := uint64(d.Get("vins_id").(int)) + + req := vins.ListRequest{} + + vinsList, err := c.CloudBroker().VINS().List(ctx, req) + if err != nil { + return false, err + } + + return len(vinsList.FilterByID(vinsID).Data) != 0, nil +} diff --git a/internal/service/cloudbroker/lb/resource_lb.go b/internal/service/cloudbroker/lb/resource_lb.go new file mode 100644 index 0000000..4196b5d --- /dev/null +++ b/internal/service/cloudbroker/lb/resource_lb.go @@ -0,0 +1,454 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" +) + +func resourceLBCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBCreate") + + haveRGID, err := existRGID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + if !haveRGID { + return diag.Errorf("resourceLBCreate: can't create LB because RGID %d is not allowed or does not exist", d.Get("rg_id").(int)) + } + + haveExtNetID, err := existExtNetID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + if !haveExtNetID { + return diag.Errorf("resourceLBCreate: can't create LB because ExtNetID %d is not allowed or does not exist", d.Get("extnet_id").(int)) + } + + haveVins, err := existViNSID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + if !haveVins { + return diag.Errorf("resourceLBCreate: can't create LB because ViNSID %d is not allowed or does not exist", d.Get("vins_id").(int)) + } + + c := m.(*controller.ControllerCfg) + req := lb.CreateRequest{ + Name: d.Get("name").(string), + RGID: uint64(d.Get("rg_id").(int)), + ExtNetID: uint64(d.Get("extnet_id").(int)), + VINSID: uint64(d.Get("vins_id").(int)), + } + if start, ok := d.GetOk("start"); ok { + req.Start = start.(bool) + } + if desc, ok := d.GetOk("desc"); ok { + req.Description = desc.(string) + } + + lbId, err := c.CloudBroker().LB().Create(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.FormatUint(lbId, 10)) + d.Set("lb_id", lbId) + + _, err = utilityLBCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + if enable, ok := d.GetOk("enable"); ok { + lbId := uint64(d.Get("lb_id").(int)) + + if enable.(bool) { + req := lb.EnableRequest{ + LBID: lbId, + } + _, err := c.CloudBroker().LB().Enable(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } else { + req := lb.DisableRequest{ + LBID: lbId, + } + _, err := c.CloudBroker().LB().Disable(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + + return resourceLBRead(ctx, d, m) +} + +func resourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBRead") + + c := m.(*controller.ControllerCfg) + + lbRec, err := utilityLBCheckPresence(ctx, d, m) + if lbRec == nil { + d.SetId("") + return diag.FromErr(err) + } + + hasChanged := false + + switch lbRec.Status { + case status.Modeled: + return diag.Errorf("The LB is in status: %s, please, contact support for more information", lbRec.Status) + case status.Creating: + case status.Created: + case status.Deleting: + case status.Deleted: + lbId, _ := strconv.ParseUint(d.Id(), 10, 64) + restoreReq := lb.RestoreRequest{LBID: lbId} + + _, err := c.CloudBroker().LB().Restore(ctx, restoreReq) + if err != nil { + return diag.FromErr(err) + } + + if enable := d.Get("enable"); enable.(bool) { + req := lb.EnableRequest{ + LBID: lbId, + } + _, err := c.CloudBroker().LB().Enable(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + if start := d.Get("start"); start.(bool) { + if enable := d.Get("enable"); enable.(bool) { + req := lb.StartRequest{ + LBID: lbId, + } + _, err := c.CloudBroker().LB().Start(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } else { + return diag.Errorf("To start the LB, please, enable LB first.") + } + } + + hasChanged = true + case status.Destroying: + return diag.Errorf("The LB is in progress with status: %s", lbRec.Status) + case status.Destroyed: + d.SetId("") + return resourceLBCreate(ctx, d, m) + case status.Enabled: + case status.Enabling: + case status.Disabling: + case status.Disabled: + log.Debugf("The LB is in status: %s, troubles may occur with update. Please, enable LB first.", lbRec.Status) + case status.Restoring: + } + + if hasChanged { + lbRec, err = utilityLBCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + } + + flattenLB(d, lbRec) + + return nil +} + +func resourceLBDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBDelete") + + _, err := utilityLBCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + c := m.(*controller.ControllerCfg) + req := lb.DeleteRequest{ + LBID: uint64(d.Get("lb_id").(int)), + } + + if permanently, ok := d.GetOk("permanently"); ok { + req.Permanently = permanently.(bool) + } + + _, err = c.CloudBroker().LB().Delete(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId("") + + return nil +} + +func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBUpdate") + c := m.(*controller.ControllerCfg) + + haveRGID, err := existRGID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + if !haveRGID { + return diag.Errorf("resourceLBUpdate: can't update LB because RGID %d is not allowed or does not exist", d.Get("rg_id").(int)) + } + + haveExtNetID, err := existExtNetID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + if !haveExtNetID { + return diag.Errorf("resourceLBUpdate: can't update LB because ExtNetID %d is not allowed or does not exist", d.Get("extnet_id").(int)) + } + + haveVins, err := existViNSID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + if !haveVins { + return diag.Errorf("resourceLBUpdate: can't update LB because ViNSID %d is not allowed or does not exist", d.Get("vins_id").(int)) + } + + lbRec, err := utilityLBCheckPresence(ctx, d, m) + if lbRec == nil { + d.SetId("") + return diag.FromErr(err) + } + + hasChanged := false + + switch lbRec.Status { + case status.Modeled: + return diag.Errorf("The LB is in status: %s, please, contact support for more information", lbRec.Status) + case status.Creating: + case status.Created: + case status.Deleting: + case status.Deleted: + lbId, _ := strconv.ParseUint(d.Id(), 10, 64) + restoreReq := lb.RestoreRequest{LBID: lbId} + + _, err := c.CloudBroker().LB().Restore(ctx, restoreReq) + if err != nil { + return diag.FromErr(err) + } + + if enable := d.Get("enable"); enable.(bool) { + req := lb.EnableRequest{ + LBID: lbId, + } + _, err := c.CloudBroker().LB().Enable(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + if start := d.Get("start"); start.(bool) { + if enable := d.Get("enable"); enable.(bool) { + req := lb.StartRequest{ + LBID: lbId, + } + _, err := c.CloudBroker().LB().Start(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } else { + return diag.Errorf("To start the LB, please, enable LB first.") + } + } + + hasChanged = true + case status.Destroying: + return diag.Errorf("The LB is in progress with status: %s", lbRec.Status) + case status.Destroyed: + d.SetId("") + return resourceLBCreate(ctx, d, m) + case status.Enabled: + case status.Enabling: + case status.Disabling: + case status.Disabled: + log.Debugf("The LB is in status: %s, troubles may occur with update. Please, enable LB first.", lbRec.Status) + case status.Restoring: + } + + if hasChanged { + _, err = utilityLBCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + } + + if d.HasChange("enable") { + enable := d.Get("enable").(bool) + if enable { + req := lb.EnableRequest{ + LBID: uint64(d.Get("lb_id").(int)), + } + _, err := c.CloudBroker().LB().Enable(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } else { + req := lb.DisableRequest{ + LBID: uint64(d.Get("lb_id").(int)), + } + _, err := c.CloudBroker().LB().Disable(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + } + } + + if d.HasChange("start") { + start := d.Get("start").(bool) + lbId := uint64(d.Get("lb_id").(int)) + if start { + req := lb.StartRequest{LBID: lbId} + _, err := c.CloudBroker().LB().Start(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } else { + req := lb.StopRequest{LBID: lbId} + _, err := c.CloudBroker().LB().Stop(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + + if d.HasChange("desc") { + req := lb.UpdateRequest{ + LBID: uint64(d.Get("lb_id").(int)), + Description: d.Get("desc").(string), + } + + _, err := c.CloudBroker().LB().Update(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("restart") { + restart := d.Get("restart").(bool) + if restart { + req := lb.RestartRequest{ + LBID: uint64(d.Get("lb_id").(int)), + } + + _, err := c.CloudBroker().LB().Restart(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + + if d.HasChange("restore") { + restore := d.Get("restore").(bool) + if restore { + req := lb.RestoreRequest{ + LBID: uint64(d.Get("lb_id").(int)), + } + + _, err := c.CloudBroker().LB().Restore(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + + if d.HasChange("config_reset") { + cfgReset := d.Get("config_reset").(bool) + if cfgReset { + req := lb.ConfigResetRequest{ + LBID: uint64(d.Get("lb_id").(int)), + } + + _, err := c.CloudBroker().LB().ConfigReset(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } + + return resourceLBRead(ctx, d, m) +} + +func ResourceLB() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourceLBCreate, + ReadContext: resourceLBRead, + UpdateContext: resourceLBUpdate, + DeleteContext: resourceLBDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, + }, + + Schema: lbResourceSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/lb/resource_lb_backend.go b/internal/service/cloudbroker/lb/resource_lb_backend.go new file mode 100644 index 0000000..5d8385d --- /dev/null +++ b/internal/service/cloudbroker/lb/resource_lb_backend.go @@ -0,0 +1,374 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import ( + "context" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func resourceLBBackendCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBBackendCreate") + + haveLBID, err := existLBID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + if !haveLBID { + return diag.Errorf("resourceLBBackendCreate: can't create LB backend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) + } + + c := m.(*controller.ControllerCfg) + req := lb.BackendCreateRequest{} + + req.BackendName = d.Get("name").(string) + req.LBID = uint64(d.Get("lb_id").(int)) + + if algorithm, ok := d.GetOk("algorithm"); ok { + req.Algorithm = algorithm.(string) + } + if inter, ok := d.GetOk("inter"); ok { + req.Inter = uint64(inter.(int)) + } + if downinter, ok := d.GetOk("downinter"); ok { + req.DownInter = uint64(downinter.(int)) + } + if rise, ok := d.GetOk("rise"); ok { + req.Rise = uint64(rise.(int)) + } + if fall, ok := d.GetOk("fall"); ok { + req.Fall = uint64(fall.(int)) + } + if slowstart, ok := d.GetOk("slowstart"); ok { + req.SlowStart = uint64(slowstart.(int)) + } + if maxconn, ok := d.GetOk("maxconn"); ok { + req.MaxConn = uint64(maxconn.(int)) + } + if maxqueue, ok := d.GetOk("maxqueue"); ok { + req.MaxQueue = uint64(maxqueue.(int)) + } + if weight, ok := d.GetOk("weight"); ok { + req.Weight = uint64(weight.(int)) + } + + _, err = c.CloudBroker().LB().BackendCreate(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("name").(string)) + + _, err = utilityLBBackendCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + return resourceLBBackendRead(ctx, d, m) +} + +func resourceLBBackendRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBBackendRead") + + b, err := utilityLBBackendCheckPresence(ctx, d, m) + if b == nil { + d.SetId("") + return diag.FromErr(err) + } + + lbId, _ := strconv.ParseInt(strings.Split(d.Id(), "#")[0], 10, 32) + + flattenResourceLBBackend(d, b, lbId) + + return nil +} + +func resourceLBBackendDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBBackendDelete") + + _, err := utilityLBBackendCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + c := m.(*controller.ControllerCfg) + req := lb.BackendDeleteRequest{ + LBID: uint64(d.Get("lb_id").(int)), + BackendName: d.Get("name").(string), + } + + _, err = c.CloudBroker().LB().BackendDelete(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId("") + + return nil +} + +func resourceLBBackendUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBBackendEdit") + c := m.(*controller.ControllerCfg) + + haveLBID, err := existLBID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + if !haveLBID { + return diag.Errorf("resourceLBBackendUpdate: can't update LB backend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) + } + + req := lb.BackendUpdateRequest{ + LBID: uint64(d.Get("lb_id").(int)), + BackendName: d.Get("name").(string), + } + + if d.HasChange("algorithm") { + req.Algorithm = d.Get("algorithm").(string) + } + if d.HasChange("inter") { + req.Inter = uint64(d.Get("inter").(int)) + } + if d.HasChange("downinter") { + req.DownInter = uint64(d.Get("downinter").(int)) + } + if d.HasChange("rise") { + req.Rise = uint64(d.Get("rise").(int)) + } + if d.HasChange("fall") { + req.Fall = uint64(d.Get("fall").(int)) + } + if d.HasChange("slowstart") { + req.SlowStart = uint64(d.Get("slowstart").(int)) + } + if d.HasChange("maxconn") { + req.MaxConn = uint64(d.Get("maxconn").(int)) + } + if d.HasChange("maxqueue") { + req.MaxQueue = uint64(d.Get("maxqueue").(int)) + } + if d.HasChange("weight") { + req.Weight = uint64(d.Get("weight").(int)) + } + + _, err = c.CloudBroker().LB().BackendUpdate(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + return resourceLBBackendRead(ctx, d, m) +} + +func ResourceLBBackend() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourceLBBackendCreate, + ReadContext: resourceLBBackendRead, + UpdateContext: resourceLBBackendUpdate, + DeleteContext: resourceLBBackendDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, + }, + + Schema: map[string]*schema.Schema{ + "lb_id": { + Type: schema.TypeInt, + Required: true, + Description: "ID of the LB instance to backendCreate", + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: "Must be unique among all backends of this LB - name of the new backend to create", + }, + "algorithm": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"roundrobin", "static-rr", "leastconn"}, false), + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "downinter": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "fall": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "inter": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "maxconn": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "maxqueue": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "rise": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "slowstart": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "weight": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "servers": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "check": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "server_settings": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "downinter": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "fall": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "inter": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "maxconn": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "maxqueue": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "rise": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "slowstart": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "weight": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} diff --git a/internal/service/cloudbroker/lb/resource_lb_backend_server.go b/internal/service/cloudbroker/lb/resource_lb_backend_server.go new file mode 100644 index 0000000..5bd14cb --- /dev/null +++ b/internal/service/cloudbroker/lb/resource_lb_backend_server.go @@ -0,0 +1,311 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import ( + "context" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func resourceLBBackendServerCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBBackendServerCreate") + + haveLBID, err := existLBID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + if !haveLBID { + return diag.Errorf("resourceLBBackendServerCreate: can't create LB backend server because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) + } + + c := m.(*controller.ControllerCfg) + req := lb.BackendServerAddRequest{ + BackendName: d.Get("backend_name").(string), + ServerName: d.Get("name").(string), + Address: d.Get("address").(string), + LBID: uint64(d.Get("lb_id").(int)), + Port: uint64(d.Get("port").(int)), + } + + if check, ok := d.GetOk("check"); ok { + req.Check = check.(string) + } + if inter, ok := d.GetOk("inter"); ok { + req.Inter = uint64(inter.(int)) + } + if downinter, ok := d.GetOk("downinter"); ok { + req.DownInter = uint64(downinter.(int)) + } + if rise, ok := d.GetOk("rise"); ok { + req.Rise = uint64(rise.(int)) + } + if fall, ok := d.GetOk("fall"); ok { + req.Fall = uint64(fall.(int)) + } + if slowstart, ok := d.GetOk("slowstart"); ok { + req.SlowStart = uint64(slowstart.(int)) + } + if maxconn, ok := d.GetOk("maxconn"); ok { + req.MaxConn = uint64(maxconn.(int)) + } + if maxqueue, ok := d.GetOk("maxqueue"); ok { + req.MaxQueue = uint64(maxqueue.(int)) + } + if weight, ok := d.GetOk("weight"); ok { + req.Weight = uint64(weight.(int)) + } + + _, err = c.CloudBroker().LB().BackendServerAdd(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("backend_name").(string) + "#" + d.Get("name").(string)) + + _, err = utilityLBBackendServerCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + return resourceLBBackendServerRead(ctx, d, m) +} + +func resourceLBBackendServerRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBBackendServerRead") + + s, err := utilityLBBackendServerCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + lbId, _ := strconv.ParseInt(strings.Split(d.Id(), "#")[0], 10, 32) + backendName := strings.Split(d.Id(), "#")[1] + + flattenResourceLBBackendServer(d, s, lbId, backendName) + + return nil +} + +func resourceLBBackendServerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBBackendServerDelete") + + _, err := utilityLBBackendServerCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + c := m.(*controller.ControllerCfg) + req := lb.BackendServerDeleteRequest{ + LBID: uint64(d.Get("lb_id").(int)), + BackendName: d.Get("backend_name").(string), + ServerName: d.Get("name").(string), + } + + _, err = c.CloudBroker().LB().BackendServerDelete(ctx, req) + if err != nil { + return diag.FromErr(err) + } + d.SetId("") + + return nil +} + +func resourceLBBackendServerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBBackendServerEdit") + c := m.(*controller.ControllerCfg) + + haveLBID, err := existLBID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + if !haveLBID { + return diag.Errorf("resourceLBBackendServerUpdate: can't update LB backend server because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) + } + + req := lb.BackendServerUpdateRequest{ + BackendName: d.Get("backend_name").(string), + LBID: uint64(d.Get("lb_id").(int)), + ServerName: d.Get("name").(string), + Address: d.Get("address").(string), + Port: uint64(d.Get("port").(int)), + } + + if d.HasChange("check") { + req.Check = d.Get("check").(string) + } + if d.HasChange("inter") { + req.Inter = uint64(d.Get("inter").(int)) + } + if d.HasChange("downinter") { + req.DownInter = uint64(d.Get("downinter").(int)) + } + if d.HasChange("rise") { + req.Rise = uint64(d.Get("rise").(int)) + } + if d.HasChange("fall") { + req.Fall = uint64(d.Get("fall").(int)) + } + if d.HasChange("slowstart") { + req.SlowStart = uint64(d.Get("slowstart").(int)) + } + if d.HasChange("maxconn") { + req.MaxConn = uint64(d.Get("maxconn").(int)) + } + if d.HasChange("maxqueue") { + req.MaxQueue = uint64(d.Get("maxqueue").(int)) + } + if d.HasChange("weight") { + req.Weight = uint64(d.Get("weight").(int)) + } + + _, err = c.CloudBroker().LB().BackendServerUpdate(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + return resourceLBBackendServerRead(ctx, d, m) +} + +func ResourceLBBackendServer() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourceLBBackendServerCreate, + ReadContext: resourceLBBackendServerRead, + UpdateContext: resourceLBBackendServerUpdate, + DeleteContext: resourceLBBackendServerDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, + }, + + Schema: map[string]*schema.Schema{ + "lb_id": { + Type: schema.TypeInt, + Required: true, + Description: "ID of the LB instance to backendCreate", + }, + "backend_name": { + Type: schema.TypeString, + Required: true, + Description: "Must be unique among all backends of this LB - name of the new backend to create", + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: "Must be unique among all servers defined for this backend - name of the server definition to add.", + }, + "address": { + Type: schema.TypeString, + Required: true, + Description: "IP address of the server.", + }, + "port": { + Type: schema.TypeInt, + Required: true, + Description: "Port number on the server", + }, + "check": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"disabled", "enabled"}, false), + Description: "set to disabled if this server should be used regardless of its state.", + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "downinter": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "fall": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "inter": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "maxconn": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "maxqueue": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "rise": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "slowstart": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "weight": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + }, + } +} diff --git a/internal/service/cloudbroker/lb/resource_lb_frontend.go b/internal/service/cloudbroker/lb/resource_lb_frontend.go new file mode 100644 index 0000000..20cd94b --- /dev/null +++ b/internal/service/cloudbroker/lb/resource_lb_frontend.go @@ -0,0 +1,191 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import ( + "context" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func resourceLBFrontendCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBFrontendCreate") + + haveLBID, err := existLBID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + if !haveLBID { + return diag.Errorf("resourceLBFrontendCreate: can't create LB frontend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) + } + + c := m.(*controller.ControllerCfg) + req := lb.FrontendCreateRequest{ + BackendName: d.Get("backend_name").(string), + LBID: uint64(d.Get("lb_id").(int)), + FrontendName: d.Get("name").(string), + } + + _, err = c.CloudBroker().LB().FrontendCreate(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("name").(string)) + + _, err = utilityLBFrontendCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + return resourceLBFrontendRead(ctx, d, m) +} + +func resourceLBFrontendRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBFrontendRead") + + f, err := utilityLBFrontendCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + lbId, _ := strconv.ParseInt(strings.Split(d.Id(), "#")[0], 10, 32) + + flattenLBFrontend(d, f, lbId) + + return nil +} + +func resourceLBFrontendDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBFrontendDelete") + + _, err := utilityLBFrontendCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + c := m.(*controller.ControllerCfg) + req := lb.FrontendDeleteRequest{ + LBID: uint64(d.Get("lb_id").(int)), + FrontendName: d.Get("name").(string), + } + + _, err = c.CloudBroker().LB().FrontendDelete(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId("") + + return nil +} + +func resourceLBFrontendEdit(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + return nil +} + +func ResourceLBFrontend() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourceLBFrontendCreate, + ReadContext: resourceLBFrontendRead, + UpdateContext: resourceLBFrontendEdit, + DeleteContext: resourceLBFrontendDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, + }, + + Schema: map[string]*schema.Schema{ + "lb_id": { + Type: schema.TypeInt, + Required: true, + Description: "ID of the LB instance to backendCreate", + }, + "backend_name": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "bindings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} diff --git a/internal/service/cloudbroker/lb/resource_lb_frontend_bind.go b/internal/service/cloudbroker/lb/resource_lb_frontend_bind.go new file mode 100644 index 0000000..58ac1a9 --- /dev/null +++ b/internal/service/cloudbroker/lb/resource_lb_frontend_bind.go @@ -0,0 +1,208 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import ( + "context" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func resourceLBFrontendBindCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBFrontendBindCreate") + + haveLBID, err := existLBID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + if !haveLBID { + return diag.Errorf("resourceLBFrontendBindCreate: can't create LB frontend bind because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) + } + + c := m.(*controller.ControllerCfg) + req := lb.FrontendBindRequest{ + LBID: uint64(d.Get("lb_id").(int)), + FrontendName: d.Get("frontend_name").(string), + BindingName: d.Get("name").(string), + BindingAddress: d.Get("address").(string), + BindingPort: uint64(d.Get("port").(int)), + } + + _, err = c.CloudBroker().LB().FrontendBind(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("frontend_name").(string) + "#" + d.Get("name").(string)) + + _, err = utilityLBFrontendBindCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + return resourceLBFrontendBindRead(ctx, d, m) +} + +func resourceLBFrontendBindRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBFrontendBindRead") + + b, err := utilityLBFrontendBindCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + lbId, _ := strconv.ParseInt(strings.Split(d.Id(), "#")[0], 10, 32) + frontendName := strings.Split(d.Id(), "#")[1] + + flattenLBFrontendBind(d, b, lbId, frontendName) + + return nil +} + +func resourceLBFrontendBindDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBFrontendBindDelete") + + _, err := utilityLBFrontendBindCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + c := m.(*controller.ControllerCfg) + req := lb.FrontendBindDeleteRequest{ + LBID: uint64(d.Get("lb_id").(int)), + FrontendName: d.Get("frontend_name").(string), + BindingName: d.Get("name").(string), + } + + _, err = c.CloudBroker().LB().FrontendBindDelete(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId("") + + return nil +} + +func resourceLBFrontendBindUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceLBFrontendBindEdit") + c := m.(*controller.ControllerCfg) + + haveLBID, err := existLBID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + if !haveLBID { + return diag.Errorf("resourceLBFrontendBindUpdate: can't update LB frontend bind because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) + } + + req := lb.FrontendBindUpdateRequest{ + FrontendName: d.Get("frontend_name").(string), + BindingName: d.Get("name").(string), + LBID: uint64(d.Get("lb_id").(int)), + } + + if d.HasChange("address") || d.HasChange("port") { + req.BindingAddress = d.Get("address").(string) + req.BindingPort = uint64(d.Get("port").(int)) + } + + _, err = c.CloudBroker().LB().FrontendBindUpdate(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + return resourceLBFrontendBindRead(ctx, d, m) +} + +func ResourceLBFrontendBind() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourceLBFrontendBindCreate, + ReadContext: resourceLBFrontendBindRead, + UpdateContext: resourceLBFrontendBindUpdate, + DeleteContext: resourceLBFrontendBindDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, + }, + + Schema: map[string]*schema.Schema{ + "lb_id": { + Type: schema.TypeInt, + Required: true, + Description: "ID of the LB instance to backendCreate", + }, + "frontend_name": { + Type: schema.TypeString, + Required: true, + Description: "Must be unique among all backends of this LB - name of the new backend to create", + }, + "address": { + Type: schema.TypeString, + Required: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "port": { + Type: schema.TypeInt, + Required: true, + }, + }, + } +} diff --git a/internal/service/cloudbroker/lb/utility_lb.go b/internal/service/cloudbroker/lb/utility_lb.go new file mode 100644 index 0000000..21ad177 --- /dev/null +++ b/internal/service/cloudbroker/lb/utility_lb.go @@ -0,0 +1,62 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityLBCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*lb.RecordLB, error) { + c := m.(*controller.ControllerCfg) + req := lb.GetRequest{} + + + if d.Id() != "" { + rgId, _ := strconv.ParseUint(d.Id(), 10, 64) + req.LBID = rgId + } else { + req.LBID = uint64(d.Get("lb_id").(int)) + } + + lb, err := c.CloudBroker().LB().Get(ctx, req) + if err != nil { + return nil, err + } + + return lb, nil +} diff --git a/internal/service/cloudbroker/lb/utility_lb_backend.go b/internal/service/cloudbroker/lb/utility_lb_backend.go new file mode 100644 index 0000000..80ec270 --- /dev/null +++ b/internal/service/cloudbroker/lb/utility_lb_backend.go @@ -0,0 +1,73 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityLBBackendCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*lb.ItemBackend, error) { + c := m.(*controller.ControllerCfg) + req := lb.GetRequest{} + bName := d.Get("name").(string) + + if d.Id() != "" { + parameters := strings.Split(d.Id(), "#") + lbId, _ := strconv.ParseUint(parameters[0], 10, 64) + req.LBID = lbId + bName = parameters[1] + } else { + req.LBID = uint64(d.Get("lb_id").(int)) + } + + lb, err := c.CloudBroker().LB().Get(ctx, req) + if err != nil { + return nil, err + } + + backends := lb.Backends + for _, b := range backends { + if b.Name == bName { + return &b, nil + } + } + + return nil, fmt.Errorf("can not find backend with name: %s for lb: %d", bName, lb.ID) +} diff --git a/internal/service/cloudbroker/lb/utility_lb_backend_server.go b/internal/service/cloudbroker/lb/utility_lb_backend_server.go new file mode 100644 index 0000000..cdbecb2 --- /dev/null +++ b/internal/service/cloudbroker/lb/utility_lb_backend_server.go @@ -0,0 +1,88 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityLBBackendServerCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*lb.ItemServer, error) { + c := m.(*controller.ControllerCfg) + req := lb.GetRequest{} + + bName := d.Get("backend_name").(string) + sName := d.Get("name").(string) + + if d.Id() != "" { + parameters := strings.Split(d.Id(), "#") + lbId, _ := strconv.ParseUint(parameters[0], 10, 64) + req.LBID = lbId + bName = parameters[1] + sName = parameters[2] + } else { + req.LBID = uint64(d.Get("lb_id").(int)) + } + + foundLB, err := c.CloudBroker().LB().Get(ctx, req) + if err != nil { + return nil, err + } + + backend := &lb.ItemBackend{} + backends := foundLB.Backends + for i, b := range backends { + if b.Name == bName { + backend = &backends[i] + break + } + } + + if backend.Name == "" { + return nil, fmt.Errorf("can not find backend with name: %s for lb: %d", bName, foundLB.ID) + } + + for _, s := range backend.Servers { + if s.Name == sName { + return &s, nil + } + } + + return nil, fmt.Errorf("can not find server with name: %s for backend: %s for lb: %d", sName, bName, foundLB.ID) +} diff --git a/internal/service/cloudbroker/lb/utility_lb_frontend.go b/internal/service/cloudbroker/lb/utility_lb_frontend.go new file mode 100644 index 0000000..18d4c90 --- /dev/null +++ b/internal/service/cloudbroker/lb/utility_lb_frontend.go @@ -0,0 +1,74 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityLBFrontendCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*lb.ItemFrontend, error) { + c := m.(*controller.ControllerCfg) + req := lb.GetRequest{} + + fName := d.Get("name").(string) + + if d.Id() != "" { + parameters := strings.Split(d.Id(), "#") + lbId, _ := strconv.ParseUint(parameters[0], 10, 64) + req.LBID = lbId + fName = parameters[1] + } else { + req.LBID = uint64(d.Get("lb_id").(int)) + } + + foundLB, err := c.CloudBroker().LB().Get(ctx, req) + if err != nil { + return nil, err + } + + frontends := foundLB.Frontends + for _, f := range frontends { + if f.Name == fName { + return &f, nil + } + } + + return nil, fmt.Errorf("can not find frontend with name: %s for lb: %d", fName, foundLB.ID) +} diff --git a/internal/service/cloudbroker/lb/utility_lb_frontend_bind.go b/internal/service/cloudbroker/lb/utility_lb_frontend_bind.go new file mode 100644 index 0000000..adfb5c4 --- /dev/null +++ b/internal/service/cloudbroker/lb/utility_lb_frontend_bind.go @@ -0,0 +1,87 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityLBFrontendBindCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*lb.ItemBinding, error) { + c := m.(*controller.ControllerCfg) + req := lb.GetRequest{} + + fName := d.Get("frontend_name").(string) + bName := d.Get("name").(string) + + if d.Id() != "" { + parameters := strings.Split(d.Id(), "#") + lbId, _ := strconv.ParseUint(parameters[0], 10, 64) + req.LBID = lbId + fName = parameters[1] + bName = parameters[2] + } else { + req.LBID = uint64(d.Get("lb_id").(int)) + } + + foundLB, err := c.CloudBroker().LB().Get(ctx, req) + if err != nil { + return nil, err + } + + frontend := &lb.ItemFrontend{} + frontends := foundLB.Frontends + for i, f := range frontends { + if f.Name == fName { + frontend = &frontends[i] + break + } + } + if frontend.Name == "" { + return nil, fmt.Errorf("can not find frontend with name: %s for lb: %d", fName, foundLB.ID) + } + + for _, b := range frontend.Bindings { + if b.Name == bName { + return &b, nil + } + } + + return nil, fmt.Errorf("can not find bind with name: %s for frontend: %s for lb: %d", bName, fName, foundLB.ID) +} diff --git a/internal/service/cloudbroker/lb/utility_lb_list.go b/internal/service/cloudbroker/lb/utility_lb_list.go new file mode 100644 index 0000000..ca2d904 --- /dev/null +++ b/internal/service/cloudbroker/lb/utility_lb_list.go @@ -0,0 +1,100 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import ( + "context" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityLBListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*lb.ListLB, error) { + c := m.(*controller.ControllerCfg) + req := lb.ListRequest{} + + if by_id, ok := d.GetOk("by_id"); ok { + req.ByID = uint64(by_id.(int)) + } + + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + + if account_id, ok := d.GetOk("account_id"); ok { + req.AccountID = uint64(account_id.(int)) + } + + if rg_id, ok := d.GetOk("rg_id"); ok { + req.RGID = uint64(rg_id.(int)) + } + + if tech_status, ok := d.GetOk("tech_status"); ok { + req.TechStatus = tech_status.(string) + } + + if status, ok := d.GetOk("status"); ok { + req.Status = status.(string) + } + + if front_ip, ok := d.GetOk("front_ip"); ok { + req.FrontIP = front_ip.(string) + } + + if back_ip, ok := d.GetOk("back_ip"); ok { + req.BackIP = back_ip.(string) + } + + if includedeleted, ok := d.GetOk("includedeleted"); ok { + req.IncludeDeleted = includedeleted.(bool) + } + + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + + log.Debugf("utilityLBListCheckPresence: load lb list") + lbList, err := c.CloudBroker().LB().List(ctx, req) + if err != nil { + return nil, err + } + + return lbList, nil +} diff --git a/internal/service/cloudbroker/lb/utility_lb_list_deleted.go b/internal/service/cloudbroker/lb/utility_lb_list_deleted.go new file mode 100644 index 0000000..75a15f4 --- /dev/null +++ b/internal/service/cloudbroker/lb/utility_lb_list_deleted.go @@ -0,0 +1,92 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package lb + +import ( + "context" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityLBListDeletedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*lb.ListLB, error) { + c := m.(*controller.ControllerCfg) + req := lb.ListDeletedRequest{} + + if by_id, ok := d.GetOk("by_id"); ok { + req.ByID = uint64(by_id.(int)) + } + + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + + if account_id, ok := d.GetOk("account_id"); ok { + req.AccountID = uint64(account_id.(int)) + } + + if rg_id, ok := d.GetOk("rg_id"); ok { + req.RGID = uint64(rg_id.(int)) + } + + if tech_status, ok := d.GetOk("tech_status"); ok { + req.TechStatus = tech_status.(string) + } + + if front_ip, ok := d.GetOk("front_ip"); ok { + req.FrontIP = front_ip.(string) + } + + if back_ip, ok := d.GetOk("back_ip"); ok { + req.BackIP = back_ip.(string) + } + + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + + log.Debugf("utilityLBListDeletedCheckPresence: load lb list") + lbList, err := c.CloudBroker().LB().ListDeleted(ctx, req) + if err != nil { + return nil, err + } + + return lbList, nil +} diff --git a/internal/service/cloudbroker/pcidevice/data_source_pcidevice.go b/internal/service/cloudbroker/pcidevice/data_source_pcidevice.go index bb927b9..d144eeb 100644 --- a/internal/service/cloudbroker/pcidevice/data_source_pcidevice.go +++ b/internal/service/cloudbroker/pcidevice/data_source_pcidevice.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -53,8 +53,9 @@ func dataSourcePcideviceRead(ctx context.Context, d *schema.ResourceData, m inte d.Set("description", pcidevice.Description) d.Set("guid", pcidevice.GUID) d.Set("hw_path", pcidevice.HwPath) - d.Set("rg_id", pcidevice.RGID) + d.Set("device_id",pcidevice.ID) d.Set("name", pcidevice.Name) + d.Set("rg_id", pcidevice.RGID) d.Set("stack_id", pcidevice.StackID) d.Set("status", pcidevice.Status) d.Set("system_name", pcidevice.SystemName) diff --git a/internal/service/cloudbroker/pcidevice/data_source_pcidevice_list.go b/internal/service/cloudbroker/pcidevice/data_source_pcidevice_list.go index c945f26..d831356 100644 --- a/internal/service/cloudbroker/pcidevice/data_source_pcidevice_list.go +++ b/internal/service/cloudbroker/pcidevice/data_source_pcidevice_list.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -43,7 +43,7 @@ import ( ) func flattenPcideviceList(pl *pcidevice.ListPCIDevices) []map[string]interface{} { - res := make([]map[string]interface{}, 0) + res := make([]map[string]interface{}, 0, len(pl.Data)) for _, item := range pl.Data { temp := map[string]interface{}{ "ckey": item.CKey, @@ -71,6 +71,7 @@ func dataSourcePcideviceListRead(ctx context.Context, d *schema.ResourceData, m } d.Set("items", flattenPcideviceList(pcideviceList)) + d.Set("entry_count", pcideviceList.EntryCount) id := uuid.New() d.SetId(id.String()) @@ -78,74 +79,57 @@ func dataSourcePcideviceListRead(ctx context.Context, d *schema.ResourceData, m return nil } -func dataSourcePcideviceItem() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "ckey": { - Type: schema.TypeString, - Computed: true, - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, +func dataSourcePcideviceListSchemaMake() map[string]*schema.Schema { + rets := map[string]*schema.Schema{ + "by_id": { + Type: schema.TypeInt, + Optional: true, + Description: "by_id", }, "compute_id": { - Type: schema.TypeInt, - Computed: true, - }, - "description": { - Type: schema.TypeString, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "hw_path": { - Type: schema.TypeString, - Computed: true, - }, - "device_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Description: "compute_id", }, "name": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Optional: true, + Description: "name", }, "rg_id": { - Type: schema.TypeInt, - Computed: true, - }, - "stack_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Description: "rg_id", }, "status": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Optional: true, + Description: "status", }, - "system_name": { - Type: schema.TypeString, - Computed: true, + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "page number", + }, + "size": { + Type: schema.TypeInt, + Optional: true, + Description: "page size", }, - } -} - -func dataSourcePcideviceListSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ "items": { Type: schema.TypeList, Computed: true, Description: "pcidevice list", Elem: &schema.Resource{ - Schema: dataSourcePcideviceItem(), + Schema: dataSourcePcideviceSchemaMake(), }, }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + Description: "entry count", + }, } - return rets } diff --git a/internal/service/cloudbroker/pcidevice/resource_check_input_vales.go b/internal/service/cloudbroker/pcidevice/resource_check_input_vales.go new file mode 100644 index 0000000..80f92c8 --- /dev/null +++ b/internal/service/cloudbroker/pcidevice/resource_check_input_vales.go @@ -0,0 +1,88 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package pcidevice + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func existStackID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + StackID := uint64(d.Get("stack_id").(int)) + RGID := uint64(d.Get("rg_id").(int)) + + req := rg.ListRequest{ + IncludeDeleted: false, + } + + rgList, err := c.CloudBroker().RG().List(ctx, req) + if err != nil { + return false, err + } + + for _, v := range rgList.FilterByID(RGID).Data { + for _, idVM := range v.VMs { + req := compute.GetRequest{ + ComputeID: idVM, + } + checkStackID, err := c.CloudBroker().Compute().Get(ctx, req) + if err != nil { + return false, err + } + if checkStackID.StackID == StackID { + return true, nil + } + } + } + + return false, err +} + +func existRGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + RGID := uint64(d.Get("rg_id").(int)) + req := rg.ListRequest{ + IncludeDeleted: false, + } + + rgList, err := c.CloudBroker().RG().List(ctx, req) + if err != nil { + return false, err + } + + return len(rgList.FilterByID(RGID).Data) != 0, nil +} diff --git a/internal/service/cloudbroker/pcidevice/resource_pcidevice.go b/internal/service/cloudbroker/pcidevice/resource_pcidevice.go index bd80c7a..aafd171 100644 --- a/internal/service/cloudbroker/pcidevice/resource_pcidevice.go +++ b/internal/service/cloudbroker/pcidevice/resource_pcidevice.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -42,19 +42,37 @@ import ( "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" + ) func resourcePcideviceCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Debugf("resourcePcideviceCreate: called for pcidevice %s", d.Get("name").(string)) c := m.(*controller.ControllerCfg) - req := pcidevice.CreateRequest{ - Name: d.Get("name").(string), - HWPath: d.Get("hw_path").(string), - RGID: uint64(d.Get("rg_id").(int)), - StackID: uint64(d.Get("stack_id").(int)), + req := pcidevice.CreateRequest{} + + haveRGID, err := existRGID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveRGID { + return diag.Errorf("resourcePcideviceCreate: can't create Pcidevice because RGID %d is not allowed or does not exist", d.Get("rg_id").(int)) } + haveStackID, err := existStackID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveStackID { + return diag.Errorf("resourcePcideviceCreate: can't create Pcidevice because StackID %d is not allowed or does not exist", d.Get("stack_id").(int)) + } + + req.StackID = uint64(d.Get("stack_id").(int)) + req.RGID = uint64(d.Get("rg_id").(int)) + req.Name = d.Get("name").(string) + req.HWPath = d.Get("hw_path").(string) + if description, ok := d.GetOk("description"); ok { req.Description = description.(string) } @@ -92,27 +110,24 @@ func resourcePcideviceRead(ctx context.Context, d *schema.ResourceData, m interf return nil } -func resourcePcideviceDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourcePcideviceDelete: called for %s, id: %s", d.Get("name").(string), d.Id()) +func resourcePcideviceUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - c := m.(*controller.ControllerCfg) - deviceID, _ := strconv.ParseUint(d.Id(), 10, 64) - req := pcidevice.DeleteRequest{ - DeviceID: deviceID, - Force: d.Get("force").(bool), + haveRGID, err := existRGID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveRGID { + return diag.Errorf("resourcePcideviceCreate: can't update Pcidevice because RGID %d is not allowed or does not exist", d.Get("rg_id").(int)) } - _, err := c.CloudBroker().PCIDevice().Delete(ctx, req) + haveStackID, err := existStackID(ctx, d, m) if err != nil { return diag.FromErr(err) } + if !haveStackID { + return diag.Errorf("resourcePcideviceCreate: can't update Pcidevice because StackID %d is not allowed or does not exist", d.Get("stack_id").(int)) + } - d.SetId("") - - return nil -} - -func resourcePcideviceUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { if d.HasChange("enable") { state := d.Get("enable").(bool) c := m.(*controller.ControllerCfg) @@ -130,6 +145,9 @@ func resourcePcideviceUpdate(ctx context.Context, d *schema.ResourceData, m inte req := pcidevice.DisableRequest{ DeviceID: uint64(d.Get("device_id").(int)), } + if force, ok := d.GetOk("force"); ok { + req.Force = force.(bool) + } _, err := c.CloudBroker().PCIDevice().Disable(ctx, req) if err != nil { @@ -141,6 +159,38 @@ func resourcePcideviceUpdate(ctx context.Context, d *schema.ResourceData, m inte return resourcePcideviceRead(ctx, d, m) } +func resourcePcideviceDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourcePcideviceDelete: called for %s, id: %s", d.Get("name").(string), d.Id()) + + pciDevice, err := utilityPcideviceCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + if pciDevice.Status == status.Destroyed || pciDevice.Status == status.Purged { + return nil + } + + c := m.(*controller.ControllerCfg) + + req := pcidevice.DeleteRequest{ + DeviceID: pciDevice.ID, + } + + if force, ok := d.GetOk("force"); ok { + req.Force = force.(bool) + } + + _, err = c.CloudBroker().PCIDevice().Delete(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId("") + + return nil +} + func resourcePcideviceSchemaMake() map[string]*schema.Schema { return map[string]*schema.Schema{ "ckey": { diff --git a/internal/service/cloudbroker/pcidevice/utility_pcidevice.go b/internal/service/cloudbroker/pcidevice/utility_pcidevice.go index fb006cc..3949c1b 100644 --- a/internal/service/cloudbroker/pcidevice/utility_pcidevice.go +++ b/internal/service/cloudbroker/pcidevice/utility_pcidevice.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -33,6 +33,7 @@ package pcidevice import ( "context" + "fmt" "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -46,11 +47,12 @@ func utilityPcideviceCheckPresence(ctx context.Context, d *schema.ResourceData, } var pcideviceId uint64 - if (d.Get("device_id").(int)) != 0 { - pcideviceId = uint64(d.Get("device_id").(int)) - } else { + + if d.Id() != "" { id, _ := strconv.ParseUint(d.Id(), 10, 64) pcideviceId = id + } else { + pcideviceId = uint64(d.Get("device_id").(int)) } for _, pd := range pcideviceList.Data { @@ -59,5 +61,5 @@ func utilityPcideviceCheckPresence(ctx context.Context, d *schema.ResourceData, } } - return nil, nil + return nil, fmt.Errorf("dataPcideviceRead: can't find Pcidevice because Device_id %d is not allowed or does not exist", d.Get("device_id").(int)) } diff --git a/internal/service/cloudbroker/pcidevice/utility_pcidevice_list.go b/internal/service/cloudbroker/pcidevice/utility_pcidevice_list.go index de7c5d9..2be8d96 100644 --- a/internal/service/cloudbroker/pcidevice/utility_pcidevice_list.go +++ b/internal/service/cloudbroker/pcidevice/utility_pcidevice_list.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, diff --git a/internal/service/cloudbroker/rg/data_source_rg.go b/internal/service/cloudbroker/rg/data_source_rg.go index 2c6b1fc..df8d29b 100644 --- a/internal/service/cloudbroker/rg/data_source_rg.go +++ b/internal/service/cloudbroker/rg/data_source_rg.go @@ -43,39 +43,88 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func flattenResgroup(d *schema.ResourceData, rgData *rg.RecordRG) error { +func flattenResgroup(d *schema.ResourceData, rgData *rg.RecordRG) { log.Debugf("flattenResgroup: decoded RG name %q / ID %d, account ID %d", rgData.Name, rgData.ID, rgData.AccountID) - d.SetId(fmt.Sprintf("%d", rgData.ID)) + d.Set("account_id", rgData.AccountID) + d.Set("account_name", rgData.AccountName) + d.Set("acl", flattenRgAcl(rgData.ACL)) + d.Set("cpu_allocation_parameter", rgData.CPUAllocationParameter) + d.Set("cpu_allocation_ratio", rgData.CPUAllocationRatio) + d.Set("created_by", rgData.CreatedBy) + d.Set("created_time", rgData.CreatedTime) + d.Set("def_net_id", rgData.DefNetID) + d.Set("def_net_type", rgData.DefNetType) + d.Set("deleted_by", rgData.DeletedBy) + d.Set("deleted_time", rgData.DeletedTime) + d.Set("desc", rgData.Description) + d.Set("dirty", rgData.Dirty) + d.Set("gid", rgData.GID) + d.Set("guid", rgData.GUID) d.Set("rg_id", rgData.ID) + d.Set("lock_status", rgData.LockStatus) + d.Set("milestones", rgData.Milestones) d.Set("name", rgData.Name) - d.Set("account_name", rgData.AccountName) - d.Set("account_id", rgData.AccountID) - // d.Set("grid_id", rgData.GridID) - d.Set("description", rgData.Description) + d.Set("register_computes", rgData.RegisterComputes) + d.Set("resource_limits", flattenRgResourceLimits(rgData.ResourceLimits)) + d.Set("resource_types", rgData.ResTypes) + d.Set("secret", rgData.Secret) d.Set("status", rgData.Status) - d.Set("def_net_type", rgData.DefNetType) - d.Set("def_net_id", rgData.DefNetID) + d.Set("uniq_pools", rgData.UniqPools) + d.Set("updated_by", rgData.UpdatedBy) + d.Set("updated_time", rgData.UpdatedTime) + d.Set("vins", rgData.VINS) + d.Set("computes", rgData.VMs) +} - // log.Debugf("flattenResgroup: calling flattenQuota()") - // if err := d.Set("quota", parseQuota(rgData.Resources)); err != nil { - // return err - // } +func flattenRgAcl(rgACLs rg.ListACL) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, acl := range rgACLs { + temp := map[string]interface{}{ + "explicit": acl.Explicit, + "guid": acl.GUID, + "right": acl.Right, + "status": acl.Status, + "type": acl.Type, + "user_group_id": acl.UserGroupID, + } + + res = append(res, temp) + } + + return res +} + +func flattenRgResourceLimits(rl rg.ResourceLimits) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "cu_c": rl.CUC, + "cu_d": rl.CuD, + "cu_dm": rl.CUDM, + "cu_i": rl.CUI, + "cu_m": rl.CUM, + "cu_np": rl.CUNP, + "gpu_units": rl.GPUUnits, + } + res = append(res, temp) + + return res - return nil } func dataSourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - rg_facts, err := utilityResgroupCheckPresence(ctx, d, m) - if rg_facts == nil { + rg, err := utilityResgroupCheckPresence(ctx, d, m) + if err != nil { // if empty string is returned from utilityResgroupCheckPresence then there is no // such resource group and err tells so - just return it to the calling party d.SetId("") // ensure ID is empty in this case return diag.FromErr(err) } + d.SetId(fmt.Sprintf("%d", rg.ID)) + flattenResgroup(d, rg) - return diag.FromErr(flattenResgroup(d, rg_facts)) + return nil } func DataSourceResgroup() *schema.Resource { @@ -89,92 +138,203 @@ func DataSourceResgroup() *schema.Resource { Default: &constants.Timeout60s, }, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: "Name of the resource group. Names are case sensitive and unique within the context of an account.", - }, - - "rg_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Unique ID of the resource group. If this ID is specified, then resource group name is ignored.", - }, - - "account_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the account, which this resource group belongs to.", - }, + Schema: dataSourceRgSchemaMake(), + } +} - "account_id": { - Type: schema.TypeInt, - Required: true, - Description: "Unique ID of the account, which this resource group belongs to.", +func dataSourceRgSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "rg_id": { + Type: schema.TypeInt, + Required: true, + }, + "reason": { + Type: schema.TypeString, + Optional: true, + }, + "account_id": { + Type: schema.TypeInt, + Computed: true, + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + }, + "acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "explicit": { + Type: schema.TypeBool, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "right": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "user_group_id": { + Type: schema.TypeString, + Computed: true, + }, + }, }, - - "description": { - Type: schema.TypeString, - Computed: true, - Description: "User-defined text description of this resource group.", + }, + "cpu_allocation_parameter": { + Type: schema.TypeString, + Computed: true, + }, + "cpu_allocation_ratio": { + Type: schema.TypeFloat, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "def_net_id": { + Type: schema.TypeInt, + Computed: true, + }, + "def_net_type": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "desc": { + Type: schema.TypeString, + Computed: true, + }, + "dirty": { + Type: schema.TypeBool, + Computed: true, + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "lock_status": { + Type: schema.TypeString, + Computed: true, + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "register_computes": { + Type: schema.TypeBool, + Computed: true, + }, + "resource_limits": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cu_c": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_d": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_dm": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_i": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_m": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_np": { + Type: schema.TypeFloat, + Computed: true, + }, + "gpu_units": { + Type: schema.TypeFloat, + Computed: true, + }, + }, }, - - /* commented out, as in this version of provider we use default Grid ID - "grid_id": { - Type: schema.TypeInt, - Computed: true, - Description: "Unique ID of the grid, where this resource group is deployed.", + }, + "resource_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, }, - */ - - "quota": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: quotaRgSubresourceSchemaMake(), // this is a dictionary - }, - Description: "Quota settings for this resource group.", + }, + "secret": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "uniq_pools": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, }, - - "def_net_type": { - Type: schema.TypeString, - Computed: true, - Description: "Type of the default network for this resource group.", + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "vins": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, }, - - "def_net_id": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the default network for this resource group (if any).", + }, + "computes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, }, - - /* - "status": { - Type: schema.TypeString, - Computed: true, - Description: "Current status of this resource group.", - }, - - "vins": { - Type: schema.TypeList, // this is a list of ints - Computed: true, - MaxItems: LimitMaxVinsPerResgroup, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "List of VINs deployed in this resource group.", - }, - - "computes": { - Type: schema.TypeList, //t his is a list of ints - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "List of computes deployed in this resource group.", - }, - */ }, } } diff --git a/internal/service/cloudbroker/rg/data_source_rg_affinity_group_computes.go b/internal/service/cloudbroker/rg/data_source_rg_affinity_group_computes.go new file mode 100644 index 0000000..248a9a4 --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_affinity_group_computes.go @@ -0,0 +1,128 @@ +package rg + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func DataSourceRgAffinityGroupComputes() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceRgAffinityGroupComputesRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceRgAffinityGroupComputesSchemaMake(), + } +} + +func dataSourceRgAffinityGroupComputesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + rgComputes, err := utilityRgAffinityGroupComputesCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.Itoa(d.Get("rg_id").(int))) + d.Set("items", flattenRgAffinityGroupComputes(rgComputes)) + return nil +} + +func flattenRgAffinityGroupComputes(list rg.ListAffinityGroupCompute) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + + for _, item := range list { + temp := map[string]interface{}{ + "compute_id": item.ComputeID, + "other_node": item.OtherNode, + "other_node_indirect": item.OtherNodeIndirect, + "other_node_indirect_soft": item.OtherNodeIndirectSoft, + "other_node_soft": item.OtherNodeSoft, + "same_node": item.SameNode, + "same_node_soft": item.SameNodeSoft, + } + res = append(res, temp) + } + + return res +} + +func dataSourceRgAffinityGroupComputesSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "rg_id": { + Type: schema.TypeInt, + Required: true, + Description: "ID of the RG", + }, + "affinity_group": { + Type: schema.TypeString, + Required: true, + Description: "Affinity group label", + }, + + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "compute_id": { + Type: schema.TypeInt, + Computed: true, + }, + "other_node": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "other_node_indirect": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "other_node_indirect_soft": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "other_node_soft": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "same_node": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "same_node_soft": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + } + + return res +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_get.go b/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_get.go new file mode 100644 index 0000000..ec14f17 --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_get.go @@ -0,0 +1,60 @@ +package rg + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func DataSourceRgAffinityGroupsGet() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceRgAffinityGroupsGetRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceRgAffinityGroupsGetSchemaMake(), + } +} + +func dataSourceRgAffinityGroupsGetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + computes, err := utilityRgAffinityGroupsGetCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + d.SetId(strconv.Itoa(d.Get("rg_id").(int))) + d.Set("ids", computes) + return nil +} + +func dataSourceRgAffinityGroupsGetSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "rg_id": { + Type: schema.TypeInt, + Required: true, + Description: "ID of the RG", + }, + "affinity_group": { + Type: schema.TypeString, + Required: true, + Description: "Affinity group label", + }, + + "ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + } + + return res +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_list.go b/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_list.go new file mode 100644 index 0000000..fba49e0 --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_list.go @@ -0,0 +1,34 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func DataSourceRgAffinityGroupsList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceRgAffinityGroupsListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceRgAffinityGroupsListSchemaMake(), + } +} + +func dataSourceRgAffinityGroupsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + return nil +} + +func dataSourceRgAffinityGroupsListSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + + } +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_audits.go b/internal/service/cloudbroker/rg/data_source_rg_audits.go new file mode 100644 index 0000000..431d326 --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_audits.go @@ -0,0 +1,91 @@ +package rg + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func DataSourceRgAudits() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceRgAuditsRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceRgAuditsSchemaMake(), + } +} + +func dataSourceRgAuditsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + rgAudits, err := utilityRgAuditsCheckPresence(ctx, d, m) + if err != nil { + diag.FromErr(err) + } + + d.SetId(strconv.Itoa(d.Get("rg_id").(int))) + d.Set("items", flattenRgAudits(rgAudits)) + return nil +} + +func flattenRgAudits(rgAudits rg.ListAudits) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, rgAudit := range rgAudits { + temp := map[string]interface{}{ + "call": rgAudit.Call, + "responsetime": rgAudit.ResponseTime, + "statuscode": rgAudit.StatusCode, + "timestamp": rgAudit.Timestamp, + "user": rgAudit.User, + } + + res = append(res, temp) + } + + return res +} + +func dataSourceRgAuditsSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "rg_id": { + Type: schema.TypeInt, + Required: true, + }, + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "call": { + Type: schema.TypeString, + Computed: true, + }, + "responsetime": { + Type: schema.TypeFloat, + Computed: true, + }, + "statuscode": { + Type: schema.TypeInt, + Computed: true, + }, + "timestamp": { + Type: schema.TypeFloat, + Computed: true, + }, + "user": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + } +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_list.go b/internal/service/cloudbroker/rg/data_source_rg_list.go index 1acca0c..d73f0a5 100644 --- a/internal/service/cloudbroker/rg/data_source_rg_list.go +++ b/internal/service/cloudbroker/rg/data_source_rg_list.go @@ -76,38 +76,6 @@ func flattenRgList(rgl *rg.ListRG) []map[string]interface{} { } -func flattenRgAcl(rgAcls rg.ListACL) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - for _, rgAcl := range rgAcls { - temp := map[string]interface{}{ - "explicit": rgAcl.Explicit, - "guid": rgAcl.GUID, - "right": rgAcl.Right, - "status": rgAcl.Status, - "type": rgAcl.Type, - "user_group_id": rgAcl.UserGroupID, - } - res = append(res, temp) - } - return res -} - -func flattenRgResourceLimits(rl rg.ResourceLimits) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - temp := map[string]interface{}{ - "cu_c": rl.CUC, - "cu_d": rl.CuD, - "cu_i": rl.CUI, - "cu_m": rl.CUM, - "cu_np": rl.CUNP, - "gpu_units": rl.GPUUnits, - } - res = append(res, temp) - - return res - -} - func dataSourceRgListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { rgList, err := utilityRgListCheckPresence(ctx, d, m) if err != nil { diff --git a/internal/service/cloudbroker/rg/resource_check_input_values.go b/internal/service/cloudbroker/rg/resource_check_input_values.go new file mode 100644 index 0000000..17b3fd2 --- /dev/null +++ b/internal/service/cloudbroker/rg/resource_check_input_values.go @@ -0,0 +1,61 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func existAccountID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + accountId := uint64(d.Get("account_id").(int)) + + req := account.ListRequest{} + + accountList, err := c.CloudBroker().Account().List(ctx, req) + if err != nil { + return false, err + } + + return len(accountList.FilterByID(accountId).Data) != 0, nil +} + +func existGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + + gid := uint64(d.Get("gid").(int)) + + gidList, err := c.CloudBroker().Grid().List(ctx, grid.ListRequest{}) + if err != nil { + return false, err + } + + for _, elem := range gidList.Data { + if elem.GID == gid { + return true, nil + } + } + + return false, nil +} + +func existExtNet(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + extNetId := uint64(d.Get("ext_net_id").(int)) + + req := extnet.ListRequest{ + AccountID: uint64(d.Get("account_id").(int)), + } + + listExtNet, err := c.CloudBroker().ExtNet().List(ctx, req) + if err != nil { + return false, err + } + + return len(listExtNet.FilterByID(extNetId).Data) != 0, nil +} diff --git a/internal/service/cloudbroker/rg/resource_rg.go b/internal/service/cloudbroker/rg/resource_rg.go index 6525aa3..8f0db89 100644 --- a/internal/service/cloudbroker/rg/resource_rg.go +++ b/internal/service/cloudbroker/rg/resource_rg.go @@ -40,7 +40,8 @@ import ( "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/location" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -48,101 +49,373 @@ import ( ) func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - rg_name, arg_set := d.GetOk("name") - if !arg_set { - return diag.FromErr(fmt.Errorf("Cannot create new RG: missing name.")) - } + c := m.(*controller.ControllerCfg) + + req := rg.CreateRequest{} + + req.Name = d.Get("rg_name").(string) + log.Debugf("resourceResgroupCreate: called for RG name %s, account ID %d", - rg_name.(string), d.Get("account_id").(int)) + req.Name, d.Get("account_id").(int)) - set_quota := false - var quota_record QuotaRecord - arg_value, arg_set := d.GetOk("quota") - if arg_set { - log.Debugf("resourceResgroupCreate: setting Quota on RG requested") - quota_record = makeQuotaRecord(arg_value.([]interface{})) - set_quota = true + haveAccount, err := existAccountID(ctx, d, m) + if err != nil { + return diag.FromErr(err) } + if !haveAccount { + return diag.Errorf("resourceResgroupCreate: can't create RG because AccountID %d is not allowed or does not exist", d.Get("account_id").(int)) + } + req.AccountID = uint64(d.Get("account_id").(int)) - c := m.(*controller.ControllerCfg) - log.Debugf("resourceResgroupCreate: called by user %q for RG name %s, account ID %d", - c.GetDecortUsername(), - rg_name.(string), d.Get("account_id").(int)) + haveGID, err := existGID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveGID { + return diag.Errorf("resourceResgroupCreate: can't create RG because GID %d is not allowed or does not exist", d.Get("gid").(int)) + } + req.GID = uint64(d.Get("gid").(int)) - req := rg.CreateRequest{ - AccountID: uint64(d.Get("account_id").(int)), - Name: rg_name.(string), - GID: uint64(location.DefaultGridID), - Owner: c.GetDecortUsername(), + if _, ok := d.GetOk("ext_net_id"); ok { + haveExtNet, err := existExtNet(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveExtNet { + return diag.Errorf("resourceResgroupCreate: can't create RG because ExtNetID %d is not allowed or does not exist", d.Get("ext_net_id").(int)) + } } - // pass quota values as set - if set_quota { - req.MaxCPUCapacity = int64(quota_record.Cpu) - req.MaxVDiskCapacity = int64(quota_record.Disk) - req.MaxMemoryCapacity = int64(quota_record.Ram) - req.MaxNetworkPeerTransfer = int64(quota_record.ExtTraffic) - req.MaxNumPublicIP = int64(quota_record.ExtIPs) - // url_values.Add("???", fmt.Sprintf("%d", quota_record.GpuUnits)) + if resLimits, ok := d.GetOk("resource_limits"); ok { + resLimits := resLimits.([]interface{})[0] + resLimitsConv := resLimits.(map[string]interface{}) + if resLimitsConv["cu_m"] != nil { + maxMemCap := int64(resLimitsConv["cu_m"].(float64)) + if maxMemCap == 0 { + req.MaxMemoryCapacity = -1 + } else { + req.MaxMemoryCapacity = maxMemCap + } + } + if resLimitsConv["cu_dm"] != nil { + maxDiskCap := int64(resLimitsConv["cu_dm"].(float64)) + if maxDiskCap == 0 { + req.MaxVDiskCapacity = -1 + } else { + req.MaxVDiskCapacity = maxDiskCap + } + } + if resLimitsConv["cu_c"] != nil { + maxCPUCap := int64(resLimitsConv["cu_c"].(float64)) + if maxCPUCap == 0 { + req.MaxCPUCapacity = -1 + } else { + req.MaxCPUCapacity = maxCPUCap + } + } + if resLimitsConv["cu_i"] != nil { + maxNumPublicIP := int64(resLimitsConv["cu_i"].(float64)) + if maxNumPublicIP == 0 { + req.MaxNumPublicIP = -1 + } else { + req.MaxNumPublicIP = maxNumPublicIP + } + } + if resLimitsConv["cu_np"] != nil { + maxNP := int64(resLimitsConv["cu_np"].(float64)) + if maxNP == 0 { + req.MaxNetworkPeerTransfer = -1 + } else { + req.MaxNetworkPeerTransfer = maxNP + } + } } - // parse and handle network settings - def_net_type, arg_set := d.GetOk("def_net_type") - if arg_set { - req.DefNet = def_net_type.(string) + if defNetType, ok := d.GetOk("def_net_type"); ok { + req.DefNet = defNetType.(string) } - ipcidr, arg_set := d.GetOk("ipcidr") - if arg_set { + if ipcidr, ok := d.GetOk("ipcidr"); ok { req.IPCIDR = ipcidr.(string) } - ext_net_id, arg_set := d.GetOk("ext_net_id") - if arg_set { - req.ExtNetID = uint64(ext_net_id.(int)) + if description, ok := d.GetOk("description"); ok { + req.Description = description.(string) + } + + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + + if extNetId, ok := d.GetOk("ext_net_id"); ok { + req.ExtNetID = uint64(extNetId.(int)) + } + + if extIp, ok := d.GetOk("ext_ip"); ok { + req.ExtIP = extIp.(string) + } + + if regComputes, ok := d.GetOk("register_computes"); ok { + req.RegisterComputes = regComputes.(bool) } - ext_ip, arg_set := d.GetOk("ext_ip") - if arg_set { - req.ExtIP = ext_ip.(string) + if uniqPools, ok := d.GetOk("uniq_pools"); ok { + uniqPools := uniqPools.([]interface{}) + + for _, pool := range uniqPools { + req.UniqPools = append(req.UniqPools, pool.(string)) + } } - rgId, err := c.CloudBroker().RG().Create(ctx, req) + rgID, err := c.CloudBroker().RG().Create(ctx, req) if err != nil { return diag.FromErr(err) } - d.SetId(strconv.FormatUint(rgId, 10)) - if !set_quota { - resp, err := utilityResgroupCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) + d.SetId(strconv.FormatUint(rgID, 10)) + + w := dc.Warnings{} + + if access, ok := d.GetOk("access"); ok { + var user, right string + + if access.(*schema.Set).Len() > 0 { + accessList := access.(*schema.Set).List() + for _, accessIface := range accessList { + access := accessIface.(map[string]interface{}) + user = access["user"].(string) + right = access["right"].(string) + + req := rg.AccessGrantRequest{ + RGID: rgID, + User: user, + Right: right, + } + + if reason, ok := access["reason"]; ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().RG().AccessGrant(ctx, req) + if err != nil { + w.Add(err) + } + } + } + } + + if defNet, ok := d.GetOk("def_net"); ok { + if defNet.(*schema.Set).Len() > 0 { + defNetList := defNet.(*schema.Set).List() + defNetItem := defNetList[0].(map[string]interface{}) + + netType := defNetItem["net_type"].(string) + + req := rg.SetDefNetRequest{ + RGID: rgID, + NetType: netType, + } + + if netID, ok := defNetItem["net_id"]; ok { + req.NetID = uint64(netID.(int)) + } + if reason, ok := defNetItem["reason"]; ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().RG().SetDefNet(ctx, req) + if err != nil { + w.Add(err) + } + + d.Set("def_net_type", netType) } - d.Set("quota", parseQuota(resp.ResourceLimits)) + if cpuAllocationParameter, ok := d.GetOk("cpu_allocation_parameter"); ok { + cpuAllocationParameter := cpuAllocationParameter.(string) + + req := rg.SetCPUAllocationParameterRequest{ + RGID: rgID, + StrictLoose: cpuAllocationParameter, + } + + log.Debugf("setting account cpu allocation parameter") + _, err := c.CloudBroker().RG().SetCPUAllocationParameter(ctx, req) + if err != nil { + w.Add(err) + } + } + + if cpuAllocationRatio, ok := d.GetOk("cpu_allocation_ratio"); ok { + cpuAllocationRatio := cpuAllocationRatio.(float64) + + req := rg.SetCPUAllocationRatioRequest{ + RGID: rgID, + Ratio: cpuAllocationRatio, + } + + log.Debugf("setting account cpu allocation ratio") + _, err := c.CloudBroker().RG().SetCPUAllocationRatio(ctx, req) + if err != nil { + w.Add(err) + } + } + + if !d.Get("enable").(bool) { + _, err := c.CloudBroker().RG().Disable(ctx, rg.DisableRequest{ + RGID: rgID, + }) + + if err != nil { + w.Add(err) + } + } } - // re-read newly created RG to make sure schema contains complete and up to date set of specifications - return resourceResgroupRead(ctx, d, m) + diags := resourceResgroupRead(ctx, d, m) + diags = append(diags, w.Get()...) + + return diags } func resourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Debugf("resourceResgroupRead: called for RG name %s, account ID %d", - d.Get("name").(string), d.Get("account_id").(int)) + d.Get("rg_name").(string), d.Get("account_id").(int)) - rg_facts, err := utilityResgroupCheckPresence(ctx, d, m) - if rg_facts == nil { + c := m.(*controller.ControllerCfg) + + rgData, err := utilityResgroupCheckPresence(ctx, d, m) + if err != nil { d.SetId("") // ensure ID is empty return diag.FromErr(err) } - return diag.FromErr(flattenResgroup(d, rg_facts)) + hasChanged := false + + switch rgData.Status { + case status.Modeled: + return diag.Errorf("The resource group is in status: %s, please, contact support for more information", rgData.Status) + case status.Created: + case status.Enabled: + case status.Deleted: + restoreReq := rg.RestoreRequest{RGID: rgData.ID} + enableReq := rg.EnableRequest{RGID: rgData.ID} + + log.Debugf("restoring RG") + _, err := c.CloudBroker().RG().Restore(ctx, restoreReq) + if err != nil { + return diag.FromErr(err) + } + + log.Debugf("enabling RG") + _, err = c.CloudBroker().RG().Enable(context.Background(), enableReq) + if err != nil { + return diag.FromErr(err) + } + + hasChanged = true + case status.Deleting: + case status.Destroyed: + d.SetId("") + return resourceResgroupCreate(ctx, d, m) + case status.Destroying: + case status.Disabled: + case status.Disabling: + case status.Enabled: + case status.Enabling: + } + + if hasChanged { + rgData, err = utilityResgroupCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + } + + flattenResourceRG(d, rgData) + + return nil } func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Debugf("resourceResgroupUpdate: called for RG name %s, account ID %d", d.Get("name").(string), d.Get("account_id").(int)) + c := m.(*controller.ControllerCfg) + + haveAccount, err := existAccountID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveAccount { + return diag.Errorf("resourceResgroupUpdate: can't create RG bacause AccountID %d not allowed or does not exist", d.Get("account_id").(int)) + } + + haveGID, err := existGID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveGID { + return diag.Errorf("resourceResgroupUpdate: can't create RG because GID %d not allowed or does not exist", d.Get("gid").(int)) + } + + if _, ok := d.GetOk("ext_net_id"); ok { + haveExtNet, err := existExtNet(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveExtNet { + return diag.Errorf("resourceResgroupUpdate: can't create RG bacause ExtNetID %d not allowed or does not exist", d.Get("ext_net_id").(int)) + } + } + + rgData, err := utilityResgroupCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + hasChanged := false + + switch rgData.Status { + case status.Modeled: + case status.Created: + case status.Enabled: + case status.Deleted: + restoreReq := rg.RestoreRequest{RGID: rgData.ID} + enableReq := rg.EnableRequest{RGID: rgData.ID} + + _, err := c.CloudBroker().RG().Restore(ctx, restoreReq) + if err != nil { + return diag.FromErr(err) + } + + _, err = c.CloudBroker().RG().Enable(ctx, enableReq) + if err != nil { + return diag.FromErr(err) + } + + hasChanged = true + case status.Deleting: + case status.Destroyed: + d.SetId("") + return resourceResgroupCreate(ctx, d, m) + case status.Destroying: + case status.Disabled: + case status.Disabling: + case status.Enabled: + case status.Enabling: + } + + if hasChanged { + rgData, err = utilityResgroupCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + } + /* NOTE: we do not allow changing the following attributes of an existing RG via terraform: - def_net_type - ipcidr @@ -151,100 +424,218 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter The following code fragment checks if any of these have been changed and generates error. */ + if ok := d.HasChange("def_net"); ok { + _, newDefNet := d.GetChange("def_net") + if newDefNet.(*schema.Set).Len() == 0 { + return diag.Errorf("resourceResgroupUpdate: block def_net must not be empty") + } + } + for _, attr := range []string{"def_net_type", "ipcidr", "ext_ip"} { - attr_new, attr_old := d.GetChange("def_net_type") - if attr_new.(string) != attr_old.(string) { + attrNew, attrOld := d.GetChange(attr) + if attrNew.(string) != attrOld.(string) { return diag.FromErr(fmt.Errorf("resourceResgroupUpdate: RG ID %s: changing %s for existing RG is not allowed", d.Id(), attr)) } } - attr_new, attr_old := d.GetChange("ext_net_id") - if attr_new.(int) != attr_old.(int) { + attrNew, attrOld := d.GetChange("ext_net_id") + if attrNew.(int) != attrOld.(int) { return diag.FromErr(fmt.Errorf("resourceResgroupUpdate: RG ID %s: changing ext_net_id for existing RG is not allowed", d.Id())) } - do_general_update := false // will be true if general RG update is necessary (API rg/update) + doGeneralUpdate := false - c := m.(*controller.ControllerCfg) - rgId, _ := strconv.ParseUint(d.Id(), 10, 64) req := rg.UpdateRequest{ - RGID: rgId, + RGID: rgData.ID, + } + + if d.HasChange("rg_name") { + req.Name = d.Get("rg_name").(string) + doGeneralUpdate = true } - name_new, name_set := d.GetOk("name") - if name_set { - log.Debugf("resourceResgroupUpdate: name specified - looking for deltas from the old settings.") - name_old, _ := d.GetChange("name") - if name_old.(string) != name_new.(string) { - do_general_update = true - req.Name = name_new.(string) + if d.HasChange("resource_limits") { + resLimits := d.Get("resource_limits").([]interface{})[0] + resLimitsConv := resLimits.(map[string]interface{}) + if resLimitsConv["cu_m"] != nil { + maxMemCap := int64(resLimitsConv["cu_m"].(float64)) + if maxMemCap == 0 { + req.MaxMemoryCapacity = -1 + } else { + req.MaxMemoryCapacity = maxMemCap + } + } + if resLimitsConv["cu_dm"] != nil { + maxDiskCap := int64(resLimitsConv["cu_dm"].(float64)) + if maxDiskCap == 0 { + req.MaxVDiskCapacity = -1 + } else { + req.MaxVDiskCapacity = maxDiskCap + } + } + if resLimitsConv["cu_c"] != nil { + maxCPUCap := int64(resLimitsConv["cu_c"].(float64)) + if maxCPUCap == 0 { + req.MaxCPUCapacity = -1 + } else { + req.MaxCPUCapacity = maxCPUCap + } + } + if resLimitsConv["cu_i"] != nil { + maxNumPublicIP := int64(resLimitsConv["cu_i"].(float64)) + if maxNumPublicIP == 0 { + req.MaxNumPublicIP = -1 + } else { + req.MaxNumPublicIP = maxNumPublicIP + } + } + if resLimitsConv["cu_np"] != nil { + maxNP := int64(resLimitsConv["cu_np"].(float64)) + if maxNP == 0 { + req.MaxNetworkPeerTransfer = -1 + } else { + req.MaxNetworkPeerTransfer = maxNP + } } + + doGeneralUpdate = true + } + + if d.HasChange("desc") { + req.Description = d.Get("desc").(string) + doGeneralUpdate = true + } + + if d.HasChange("register_computes") { + req.RegisterComputes = d.Get("register_computes").(bool) + doGeneralUpdate = true } - quota_value, quota_set := d.GetOk("quota") - if quota_set { - log.Debugf("resourceResgroupUpdate: quota specified - looking for deltas from the old quota.") - quotarecord_new := makeQuotaRecord(quota_value.([]interface{})) - quota_value_old, _ := d.GetChange("quota") // returns old as 1st, new as 2nd return value - quotarecord_old := makeQuotaRecord(quota_value_old.([]interface{})) + if d.HasChange("uniq_pools") { + uniqPools := d.Get("uniq_pools").([]interface{}) + for _, pool := range uniqPools { + req.UniqPools = append(req.UniqPools, pool.(string)) + } + + doGeneralUpdate = true + } - if quotarecord_new.Cpu != quotarecord_old.Cpu { - do_general_update = true - log.Debugf("resourceResgroupUpdate: Cpu diff %d <- %d", quotarecord_new.Cpu, quotarecord_old.Cpu) - req.MaxCPUCapacity = int64(quotarecord_new.Cpu) + if doGeneralUpdate { + log.Debugf("resourceResgroupUpdate: detected delta between new and old RG specs - updating the RG") + _, err := c.CloudBroker().RG().Update(ctx, req) + if err != nil { + return diag.FromErr(err) } + } else { + log.Debugf("resourceResgroupUpdate: no difference between old and new state - no update on the RG will be done") + } - if quotarecord_new.Disk != quotarecord_old.Disk { - do_general_update = true - log.Debugf("resourceResgroupUpdate: Disk diff %d <- %d", quotarecord_new.Disk, quotarecord_old.Disk) - req.MaxVDiskCapacity = int64(quotarecord_new.Disk) + if d.HasChange("enable") { + enable := d.Get("enable").(bool) + + if enable && rgData.Status == status.Disabled { + _, err := c.CloudBroker().RG().Enable(ctx, rg.EnableRequest{RGID: rgData.ID}) + if err != nil { + return diag.FromErr(err) + } + } else if !enable && rgData.Status == status.Enabled { + req := rg.DisableRequest{RGID: rgData.ID} + + _, err := c.CloudBroker().RG().Disable(ctx, req) + if err != nil { + return diag.FromErr(err) + } } + } // убрать в конец + + oldSet, newSet := d.GetChange("access") + + deletedAccess := oldSet.(*schema.Set).Difference(newSet.(*schema.Set)).List() + for _, deletedIface := range deletedAccess { + deleteItem := deletedIface.(map[string]interface{}) + user := deleteItem["user"].(string) - if quotarecord_new.Ram != quotarecord_old.Ram { // NB: quota on RAM is stored as float32, in units of MB - do_general_update = true - log.Debugf("resourceResgroupUpdate: Ram diff %f <- %f", quotarecord_new.Ram, quotarecord_old.Ram) - req.MaxMemoryCapacity = int64(quotarecord_new.Ram) + _, err := c.CloudBroker().RG().AccessRevoke(ctx, rg.AccessRevokeRequest{ + RGID: rgData.ID, + User: user, + }) + if err != nil { + return diag.FromErr(err) } + } - if quotarecord_new.ExtTraffic != quotarecord_old.ExtTraffic { - do_general_update = true - log.Debugf("resourceResgroupUpdate: ExtTraffic diff %d <- %d", quotarecord_new.ExtTraffic, quotarecord_old.ExtTraffic) - req.MaxNetworkPeerTransfer = int64(quotarecord_new.ExtTraffic) + addedAccess := newSet.(*schema.Set).Difference(oldSet.(*schema.Set)).List() + for _, addedIface := range addedAccess { + addedItem := addedIface.(map[string]interface{}) + user := addedItem["user"].(string) + right := addedItem["right"].(string) + + _, err := c.CloudBroker().RG().AccessGrant(ctx, rg.AccessGrantRequest{ + RGID: rgData.ID, + User: user, + Right: right, + }) + if err != nil { + return diag.FromErr(err) } + } - if quotarecord_new.ExtIPs != quotarecord_old.ExtIPs { - do_general_update = true - log.Debugf("resourceResgroupUpdate: ExtIPs diff %d <- %d", quotarecord_new.ExtIPs, quotarecord_old.ExtIPs) - req.MaxNumPublicIP = int64(quotarecord_new.ExtIPs) + if ok := d.HasChange("def_net"); ok { + oldDefNet, newDefNet := d.GetChange("def_net") + if newDefNet.(*schema.Set).Len() > 0 { + changedDefNet := (newDefNet.(*schema.Set).Difference(oldDefNet.(*schema.Set))).List() + for _, changedDefNetInterface := range changedDefNet { + defNetItem := changedDefNetInterface.(map[string]interface{}) + netType := defNetItem["net_type"].(string) + + req := rg.SetDefNetRequest{ + RGID: rgData.ID, + NetType: netType, + } + + if netID, ok := defNetItem["net_id"]; ok { + req.NetID = uint64(netID.(int)) + } + if reason, ok := defNetItem["reason"]; ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().RG().SetDefNet(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } } } - desc_new, desc_set := d.GetOk("description") - if desc_set { - log.Debugf("resourceResgroupUpdate: description specified - looking for deltas from the old settings.") - desc_old, _ := d.GetChange("description") - if desc_old.(string) != desc_new.(string) { - do_general_update = true - req.Description = desc_new.(string) + if d.HasChange("cpu_allocation_parameter") { + cpuAllocationParameter := d.Get("cpu_allocation_parameter").(string) + _, err := c.CloudBroker().RG().SetCPUAllocationParameter(ctx, rg.SetCPUAllocationParameterRequest{ + RGID: rgData.ID, + StrictLoose: cpuAllocationParameter, + }) + if err != nil { + return diag.FromErr(err) } } - if do_general_update { - log.Debugf("resourceResgroupUpdate: detected delta between new and old RG specs - updating the RG") - _, err := c.CloudBroker().RG().Update(ctx, req) + if d.HasChange("cpu_allocation_ratio") { + cpuAllocationRatio := d.Get("cpu_allocation_ratio").(float64) + _, err := c.CloudBroker().RG().SetCPUAllocationRatio(ctx, rg.SetCPUAllocationRatioRequest{ + RGID: rgData.ID, + Ratio: cpuAllocationRatio, + }) if err != nil { return diag.FromErr(err) } - } else { - log.Debugf("resourceResgroupUpdate: no difference between old and new state - no update on the RG will be done") } - return resourceResgroupRead(ctx, d, m) + return nil // убери } func resourceResgroupDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Debugf("resourceResgroupDelete: called for RG name %s, account ID %d", - d.Get("name").(string), d.Get("account_id").(int)) + d.Get("rg_name").(string), d.Get("account_id").(int)) rg_facts, err := utilityResgroupCheckPresence(ctx, d, m) if rg_facts == nil { @@ -285,19 +676,13 @@ func ResourceResgroup() *schema.Resource { Timeouts: &schema.ResourceTimeout{ Create: &constants.Timeout180s, - Read: &constants.Timeout30s, + Read: &constants.Timeout600s, Update: &constants.Timeout180s, Delete: &constants.Timeout60s, Default: &constants.Timeout60s, }, Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "Name of this resource group. Names are case sensitive and unique within the context of a account.", - }, - "account_id": { Type: schema.TypeInt, Required: true, @@ -305,26 +690,90 @@ func ResourceResgroup() *schema.Resource { Description: "Unique ID of the account, which this resource group belongs to.", }, + "gid": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Unique ID of the grid, where this resource group is deployed.", + }, + + "rg_name": { + Type: schema.TypeString, + Required: true, + Description: "Name of this resource group. Names are case sensitive and unique within the context of a account.", + }, + + "resource_limits": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cu_c": { + Type: schema.TypeFloat, + Optional: true, + Computed: true, + }, + "cu_d": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_dm": { + Type: schema.TypeFloat, + Optional: true, + Computed: true, + }, + "cu_i": { + Type: schema.TypeFloat, + Optional: true, + Computed: true, + }, + "cu_m": { + Type: schema.TypeFloat, + Optional: true, + Computed: true, + }, + "cu_np": { + Type: schema.TypeFloat, + Optional: true, + Computed: true, + }, + "gpu_units": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + "def_net_type": { - Type: schema.TypeString, - Optional: true, - Default: "PRIVATE", + Type: schema.TypeString, + Optional: true, + Computed: true, + // Default: "PRIVATE", ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC", "NONE"}, false), Description: "Type of the network, which this resource group will use as default for its computes - PRIVATE or PUBLIC or NONE.", }, - "def_net_id": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the default network for this resource group (if any).", - }, - "ipcidr": { Type: schema.TypeString, Optional: true, Description: "Address of the netowrk inside the private network segment (aka ViNS) if def_net_type=PRIVATE", }, + "description": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "User-defined text description of this resource group.", + }, + + "reason": { + Type: schema.TypeString, + Optional: true, + }, + "ext_net_id": { Type: schema.TypeInt, Optional: true, @@ -338,32 +787,126 @@ func ResourceResgroup() *schema.Resource { Description: "IP address on the external netowrk to request when def_net_type=PRIVATE and ext_net_id is not 0", }, - /* commented out, as in this version of provider we use default Grid ID - "grid_id": { - Type: schema.TypeInt, - Optional: true, - Default: 0, // if 0 is passed, default Grid ID will be used - // DefaultFunc: utilityResgroupGetDefaultGridID, - ForceNew: true, // change of Grid ID will require new RG - Description: "Unique ID of the grid, where this resource group is deployed.", + "register_computes": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + // Default: false, + Description: "Register computes in registration system", }, - */ - "quota": { + "uniq_pools": { Type: schema.TypeList, - Optional: true, Computed: true, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "access": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "user": { + Type: schema.TypeString, + Required: true, + Description: "User or group name to grant access", + }, + "right": { + Type: schema.TypeString, + Required: true, + Description: "Access rights to set, one of 'R', 'RCX' or 'ARCXDU'", + }, + "reason": { + Type: schema.TypeString, + Optional: true, + Description: "Reason for action", + }, + }, + }, + }, + + "def_net": { + Type: schema.TypeSet, + Optional: true, MaxItems: 1, Elem: &schema.Resource{ - Schema: quotaRgSubresourceSchemaMake(), + Schema: map[string]*schema.Schema{ + "net_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC"}, false), + Description: "Network type to set. Must be on of 'PRIVATE' or 'PUBLIC'.", + }, + "net_id": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Network segment ID. If netType is PUBLIC and netId is 0 then default external network segment will be selected. If netType is PRIVATE and netId=0, the first ViNS defined for this RG will be selected. Otherwise, netId identifies either existing external network segment or ViNS.", + }, + "reason": { + Type: schema.TypeString, + Optional: true, + Description: "Reason for action", + }, + }, }, - Description: "Quota settings for this resource group.", }, - "description": { + "cpu_allocation_parameter": { Type: schema.TypeString, Optional: true, - Description: "User-defined text description of this resource group.", + Computed: true, + Description: "set cpu allocation parameter", + }, + + "cpu_allocation_ratio": { + Type: schema.TypeFloat, + Optional: true, + Computed: true, + Description: "set cpu allocation ratio", + }, + + "enable": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "enable/disable account", + }, + + "acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "explicit": { + Type: schema.TypeBool, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "right": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "user_group_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, }, "account_name": { @@ -371,33 +914,107 @@ func ResourceResgroup() *schema.Resource { Computed: true, Description: "Name of the account, which this resource group belongs to.", }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, - /* - "status": { - Type: schema.TypeString, - Computed: true, - Description: "Current status of this resource group.", - }, + "def_net_id": { + Type: schema.TypeInt, + Computed: true, + Description: "ID of the default network for this resource group (if any).", + }, - "vins": { - Type: schema.TypeList, // this is a list of ints - Computed: true, - MaxItems: LimitMaxVinsPerResgroup, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "List of VINs deployed in this resource group.", + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "rg_id": { + Type: schema.TypeInt, + Computed: true, + }, + "lock_status": { + Type: schema.TypeString, + Computed: true, + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + }, + "resource_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, }, - - "computes": { - Type: schema.TypeList, // this is a list of ints - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "List of computes deployed in this resource group.", + }, + "secret": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Current status of this resource group.", + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "vins": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, }, - */ + Description: "List of VINs deployed in this resource group.", + }, }, } } + +func flattenResourceRG(d *schema.ResourceData, rgData *rg.RecordRG) { + d.Set("account_id", rgData.AccountID) + d.Set("gid", rgData.GID) + d.Set("rg_name", rgData.Name) + d.Set("resource_limits", flattenRgResourceLimits(rgData.ResourceLimits)) + d.Set("def_net_type", rgData.DefNetType) + d.Set("description", rgData.Description) + d.Set("register_computes", rgData.RegisterComputes) + d.Set("uniq_pools", rgData.UniqPools) + d.Set("cpu_allocation_parameter", rgData.CPUAllocationParameter) + d.Set("cpu_allocation_ratio", rgData.CPUAllocationRatio) + d.Set("acl", flattenRgAcl(rgData.ACL)) + d.Set("account_name", rgData.AccountName) + d.Set("created_by", rgData.CreatedBy) + d.Set("created_time", rgData.CreatedTime) + d.Set("def_net_id", rgData.DefNetID) + d.Set("deleted_by", rgData.DeletedBy) + d.Set("deleted_time", rgData.DeletedTime) + d.Set("guid", rgData.GUID) + d.Set("rg_id", rgData.ID) + d.Set("lock_status", rgData.LockStatus) + d.Set("milestones", rgData.Milestones) + d.Set("resource_types", rgData.ResTypes) + d.Set("secret", rgData.Secret) + d.Set("status", rgData.Status) + d.Set("updated_by", rgData.UpdatedBy) + d.Set("updated_time", rgData.UpdatedTime) + d.Set("vins", rgData.VINS) +} diff --git a/internal/service/cloudbroker/rg/utility_rg.go b/internal/service/cloudbroker/rg/utility_rg.go index 1b75486..e4743f3 100644 --- a/internal/service/cloudbroker/rg/utility_rg.go +++ b/internal/service/cloudbroker/rg/utility_rg.go @@ -33,76 +33,31 @@ package rg import ( "context" - "fmt" "strconv" - log "github.com/sirupsen/logrus" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*rg.RecordRG, error) { c := m.(*controller.ControllerCfg) + req := rg.GetRequest{} - idSet := false - theId, err := strconv.ParseUint(d.Id(), 10, 64) - if err != nil || theId <= 0 { - rgId, argSet := d.GetOk("rg_id") - if argSet { - theId = uint64(rgId.(int)) - idSet = true - } + if d.Id() != "" { + rgId, _ := strconv.ParseUint(d.Id(), 10, 64) + req.RGID = rgId } else { - idSet = true + req.RGID = uint64(d.Get("rg_id").(int)) } - - if idSet { - log.Debugf("utilityResgroupCheckPresence: locating RG by its ID %d", theId) - req := rg.GetRequest{ - RGID: theId, - } - - rgFacts, err := c.CloudBroker().RG().Get(ctx, req) - if err != nil { - return nil, err - } - return rgFacts, nil - } - - rgName, argSet := d.GetOk("name") - if !argSet { - return nil, fmt.Errorf("Cannot check resource group presence if name is empty and no resource group ID specified") + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) } - listReq := rg.ListRequest{ - IncludeDeleted: false, - } - model, err := c.CloudBroker().RG().List(ctx, listReq) + rgData, err := c.CloudBroker().RG().Get(ctx, req) if err != nil { return nil, err } - log.Debugf("utilityResgroupCheckPresence: traversing decoded Json of length %d", len(model.Data)) - for index, item := range model.Data { - // match by RG name & account ID - if item.Name == rgName.(string) && item.AccountID == uint64(d.Get("account_id").(int)) { - log.Debugf("utilityResgroupCheckPresence: match RG name %s / ID %d, account ID %d at index %d", - item.Name, item.ID, item.AccountID, index) - - req := rg.GetRequest{ - RGID: item.ID, - } - - apiResp, err := c.CloudBroker().RG().Get(ctx, req) - if err != nil { - return nil, err - } - - return apiResp, nil - } - } - - return nil, fmt.Errorf("Cannot find RG name %s owned by account ID %d", rgName, d.Get("account_id").(int)) + return rgData, nil } diff --git a/internal/service/cloudbroker/rg/utility_rg_affinity_group_computes.go b/internal/service/cloudbroker/rg/utility_rg_affinity_group_computes.go new file mode 100644 index 0000000..f47bc63 --- /dev/null +++ b/internal/service/cloudbroker/rg/utility_rg_affinity_group_computes.go @@ -0,0 +1,27 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityRgAffinityGroupComputesCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (rg.ListAffinityGroupCompute, error) { + c := m.(*controller.ControllerCfg) + req := rg.AffinityGroupComputesRequest{ + RGID: uint64(d.Get("rg_id").(int)), + AffinityGroup: d.Get("affinity_group").(string), + } + + log.Debugf("utilityRgAffinityGroupComputesCheckPresence: load affinity group computes") + res, err := c.CloudBroker().RG().AffinityGroupComputes(ctx, req) + if err != nil { + return nil, err + } + + return res, nil +} diff --git a/internal/service/cloudbroker/rg/utility_rg_affinity_groups_get.go b/internal/service/cloudbroker/rg/utility_rg_affinity_groups_get.go new file mode 100644 index 0000000..6e3cb0d --- /dev/null +++ b/internal/service/cloudbroker/rg/utility_rg_affinity_groups_get.go @@ -0,0 +1,26 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityRgAffinityGroupsGetCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) ([]uint64, error) { + c := m.(*controller.ControllerCfg) + req := rg.AffinityGroupsGetRequest{ + RGID: uint64(d.Get("rg_id").(int)), + AffinityGroup: d.Get("affinity_group").(string), + } + + log.Debugf("utilityRgAffinityGroupsGetCheckPresence: load computes in the specified affinity group") + computes, err := c.CloudBroker().RG().AffinityGroupsGet(ctx, req) + if err != nil { + return nil, err + } + + return computes, nil +} diff --git a/internal/service/cloudbroker/rg/utility_rg_affinity_groups_list.go b/internal/service/cloudbroker/rg/utility_rg_affinity_groups_list.go new file mode 100644 index 0000000..df71ad9 --- /dev/null +++ b/internal/service/cloudbroker/rg/utility_rg_affinity_groups_list.go @@ -0,0 +1 @@ +package rg \ No newline at end of file diff --git a/internal/service/cloudbroker/rg/utility_rg_audits.go b/internal/service/cloudbroker/rg/utility_rg_audits.go new file mode 100644 index 0000000..531dc11 --- /dev/null +++ b/internal/service/cloudbroker/rg/utility_rg_audits.go @@ -0,0 +1,25 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityRgAuditsCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (rg.ListAudits, error) { + c := m.(*controller.ControllerCfg) + req := rg.AuditsRequest{ + RGID: uint64(d.Get("rg_id").(int)), + } + + log.Debugf("utilityRgAuditsCheckPresence: load rg audits") + rgAudits, err := c.CloudBroker().RG().Audits(ctx, req) + if err != nil { + return nil, err + } + + return rgAudits, nil +} diff --git a/internal/service/cloudbroker/sep/api.go b/internal/service/cloudbroker/sep/api.go deleted file mode 100644 index 49067dd..0000000 --- a/internal/service/cloudbroker/sep/api.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package sep - -const sepAddConsumerNodesAPI = "/restmachine/cloudbroker/sep/addConsumerNodes" -const sepDelConsumerNodesAPI = "/restmachine/cloudbroker/sep/delConsumerNodes" -const sepAddProviderNodesAPI = "/restmachine/cloudbroker/sep/addProviderNodes" - -const sepConfigFieldEditAPI = "/restmachine/cloudbroker/sep/configFieldEdit" -const sepConfigInsertAPI = "/restmachine/cloudbroker/sep/configInsert" -const sepConfigValidateAPI = "/restmachine/cloudbroker/sep/configValidate" - -const sepConsumptionAPI = "/restmachine/cloudbroker/sep/consumption" - -const sepDecommissionAPI = "/restmachine/cloudbroker/sep/decommission" - -const sepEnableAPI = "/restmachine/cloudbroker/sep/enable" -const sepDisableAPI = "/restmachine/cloudbroker/sep/disable" - -const sepDiskListAPI = "/restmachine/cloudbroker/sep/diskList" - -const sepGetAPI = "/restmachine/cloudbroker/sep/get" -const sepGetConfigAPI = "/restmachine/cloudbroker/sep/getConfig" -const sepGetPoolAPI = "/restmachine/cloudbroker/sep/getPool" - -const sepCreateAPI = "/restmachine/cloudbroker/sep/create" -const sepDeleteAPI = "/restmachine/cloudbroker/sep/delete" -const sepListAPI = "/restmachine/cloudbroker/sep/list" - -const sepUpdateCapacityLimitAPI = "/restmachine/cloudbroker/sep/updateCapacityLimit" diff --git a/internal/service/cloudbroker/sep/data_source_sep.go b/internal/service/cloudbroker/sep/data_source_sep.go index b3cc897..e3b53e4 100644 --- a/internal/service/cloudbroker/sep/data_source_sep.go +++ b/internal/service/cloudbroker/sep/data_source_sep.go @@ -1,8 +1,9 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Sergey Kisil, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -34,8 +35,8 @@ package sep import ( "context" "encoding/json" + "strconv" - "github.com/google/uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" @@ -47,8 +48,6 @@ func dataSourceSepRead(ctx context.Context, d *schema.ResourceData, m interface{ if err != nil { return diag.FromErr(err) } - id := uuid.New() - d.SetId(id.String()) d.Set("ckey", desSep.CKey) d.Set("meta", flattens.FlattenMeta(desSep.Meta)) @@ -61,11 +60,14 @@ func dataSourceSepRead(ctx context.Context, d *schema.ResourceData, m interface{ d.Set("name", desSep.Name) d.Set("obj_status", desSep.ObjStatus) d.Set("provided_by", desSep.ProvidedBy) + d.Set("shared_with", desSep.SharedWith) d.Set("tech_status", desSep.TechStatus) d.Set("type", desSep.Type) data, _ := json.Marshal(desSep.Config) d.Set("config", string(data)) + d.SetId(strconv.Itoa(d.Get("sep_id").(int))) + return nil } @@ -88,7 +90,7 @@ func dataSourceSepCSchemaMake() map[string]*schema.Schema { }, }, "consumed_by": { - Type: schema.TypeList, + Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{ Type: schema.TypeInt, @@ -125,6 +127,13 @@ func dataSourceSepCSchemaMake() map[string]*schema.Schema { Type: schema.TypeInt, }, }, + "shared_with": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, "tech_status": { Type: schema.TypeString, Computed: true, diff --git a/internal/service/cloudbroker/sep/data_source_sep_config.go b/internal/service/cloudbroker/sep/data_source_sep_config.go index 519c3c4..236ee81 100644 --- a/internal/service/cloudbroker/sep/data_source_sep_config.go +++ b/internal/service/cloudbroker/sep/data_source_sep_config.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, diff --git a/internal/service/cloudbroker/sep/data_source_sep_consumption.go b/internal/service/cloudbroker/sep/data_source_sep_consumption.go index 8167626..ef1637c 100644 --- a/internal/service/cloudbroker/sep/data_source_sep_consumption.go +++ b/internal/service/cloudbroker/sep/data_source_sep_consumption.go @@ -1,8 +1,9 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Sergey Kisil, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -37,11 +38,9 @@ import ( "github.com/google/uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/sep" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" ) -// TODO: how to marshal byPool???? func dataSourceSepConsumptionRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { sepCons, err := utilitySepConsumptionCheckPresence(ctx, d, m) if err != nil { @@ -52,43 +51,11 @@ func dataSourceSepConsumptionRead(ctx context.Context, d *schema.ResourceData, m d.Set("type", sepCons.Type) d.Set("total", flattenSepConsumption(sepCons.Total)) - // d.Set("by_pool", flattenSepConsumptionPools(sepCons.ByPool)) + d.Set("by_pool", flattenSepConsumptionPools(sepCons)) return nil } -func flattenSepConsumptionPools(mp map[string]SepConsumptionInd) []map[string]interface{} { - sh := make([]map[string]interface{}, 0) - for k, v := range mp { - temp := map[string]interface{}{ - "name": k, - "disk_count": v.DiskCount, - "disk_usage": v.DiskUsage, - "snapshot_count": v.SnapshotCount, - "snapshot_usage": v.SnapshotUsage, - "usage": v.Usage, - "usage_limit": v.UsageLimit, - } - sh = append(sh, temp) - } - return sh -} - -func flattenSepConsumption(sc sep.Total) []map[string]interface{} { - sh := make([]map[string]interface{}, 0) - temp := map[string]interface{}{ - "capacity_limit": sc.CapacityLimit, - "disk_count": sc.DiskCount, - "disk_usage": sc.DiskUsage, - "snapshot_count": sc.SnapshotCount, - "snapshot_usage": sc.SnapshotUsage, - "usage": sc.Usage, - "usage_limit": sc.UsageLimit, - } - sh = append(sh, temp) - return sh -} - func dataSourceSepConsumptionSchemaMake() map[string]*schema.Schema { return map[string]*schema.Schema{ "sep_id": { @@ -97,8 +64,9 @@ func dataSourceSepConsumptionSchemaMake() map[string]*schema.Schema { Description: "sep id", }, "by_pool": { - Type: schema.TypeList, - Computed: true, + Type: schema.TypeList, + Computed: true, + Description: "consumption divided by pool", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { @@ -138,7 +106,6 @@ func dataSourceSepConsumptionSchemaMake() map[string]*schema.Schema { }, }, }, - Description: "consumption divided by pool", }, "total": { Type: schema.TypeList, diff --git a/internal/service/cloudbroker/sep/data_source_sep_disk_list.go b/internal/service/cloudbroker/sep/data_source_sep_disk_list.go index 6c681ea..1610bc4 100644 --- a/internal/service/cloudbroker/sep/data_source_sep_disk_list.go +++ b/internal/service/cloudbroker/sep/data_source_sep_disk_list.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, diff --git a/internal/service/cloudbroker/sep/data_source_sep_list.go b/internal/service/cloudbroker/sep/data_source_sep_list.go index c7192a1..81c8e15 100644 --- a/internal/service/cloudbroker/sep/data_source_sep_list.go +++ b/internal/service/cloudbroker/sep/data_source_sep_list.go @@ -1,8 +1,9 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Sergey Kisil, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -33,42 +34,13 @@ package sep import ( "context" - "encoding/json" "github.com/google/uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/sep" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" ) -func flattenSepList(sl *sep.ListSEP) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - for _, item := range sl.Data { - data, _ := json.Marshal(item.Config) - temp := map[string]interface{}{ - "ckey": item.CKey, - "meta": flattens.FlattenMeta(item.Meta), - "consumed_by": item.ConsumedBy, - "desc": item.Description, - "gid": item.GID, - "guid": item.GUID, - "sep_id": item.ID, - "milestones": item.Milestones, - "name": item.Name, - "obj_status": item.ObjStatus, - "provided_by": item.ProvidedBy, - "tech_status": item.TechStatus, - "type": item.Type, - "config": string(data), - } - - res = append(res, temp) - } - return res -} - func dataSourceSepListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { sepList, err := utilitySepListCheckPresence(ctx, d, m) if err != nil { @@ -77,12 +49,48 @@ func dataSourceSepListRead(ctx context.Context, d *schema.ResourceData, m interf id := uuid.New() d.SetId(id.String()) d.Set("items", flattenSepList(sepList)) + d.Set("entryCount", sepList.EntryCount) return nil } func dataSourceSepListSchemaMake() map[string]*schema.Schema { rets := map[string]*schema.Schema{ + "by_id": { + Type: schema.TypeInt, + Optional: true, + Description: "find by id", + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: "find by name", + }, + "gid": { + Type: schema.TypeInt, + Optional: true, + Description: "find by gid", + }, + "type": { + Type: schema.TypeString, + Optional: true, + Description: "find by sep type", + }, + "provided_by": { + Type: schema.TypeInt, + Optional: true, + Description: "find by provided physical node id", + }, + "tech_status": { + Type: schema.TypeString, + Optional: true, + Description: "find by techStatus", + }, + "consumed_by": { + Type: schema.TypeInt, + Optional: true, + Description: "find by consumed physical node id", + }, "page": { Type: schema.TypeInt, Optional: true, @@ -101,6 +109,11 @@ func dataSourceSepListSchemaMake() map[string]*schema.Schema { Schema: dataSourceSepShortSchemaMake(), }, }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + Description: "entryCount", + }, } return rets @@ -120,7 +133,7 @@ func dataSourceSepShortSchemaMake() map[string]*schema.Schema { }, }, "consumed_by": { - Type: schema.TypeList, + Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{ Type: schema.TypeInt, @@ -161,6 +174,13 @@ func dataSourceSepShortSchemaMake() map[string]*schema.Schema { Type: schema.TypeInt, }, }, + "shared_with": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, "tech_status": { Type: schema.TypeString, Computed: true, diff --git a/internal/service/cloudbroker/sep/data_source_sep_pool.go b/internal/service/cloudbroker/sep/data_source_sep_pool.go index 0dba6ce..f9d13c2 100644 --- a/internal/service/cloudbroker/sep/data_source_sep_pool.go +++ b/internal/service/cloudbroker/sep/data_source_sep_pool.go @@ -1,8 +1,9 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Sergey Kisil, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -33,7 +34,6 @@ package sep import ( "context" - "encoding/json" "github.com/google/uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -48,9 +48,7 @@ func dataSourceSepPoolRead(ctx context.Context, d *schema.ResourceData, m interf } id := uuid.New() d.SetId(id.String()) - - data, _ := json.Marshal(sepPool) - d.Set("pool", string(data)) + d.Set("pool", flattenSepPool(sepPool)) return nil } @@ -68,8 +66,65 @@ func dataSourceSepPoolSchemaMake() map[string]*schema.Schema { Description: "pool name", }, "pool": { - Type: schema.TypeString, + Type: schema.TypeList, Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_account_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "access_res_group_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "pagecache_ratio": { + Type: schema.TypeInt, + Computed: true, + }, + "reference_id": { + Type: schema.TypeString, + Computed: true, + }, + "types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "uris": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip": { + Type: schema.TypeString, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "usage_limit": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, }, } } diff --git a/internal/service/cloudbroker/sep/flattens.go b/internal/service/cloudbroker/sep/flattens.go new file mode 100644 index 0000000..3ecc192 --- /dev/null +++ b/internal/service/cloudbroker/sep/flattens.go @@ -0,0 +1,126 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package sep + +import ( + "encoding/json" + + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/sep" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" +) + +func flattenSepList(sl *sep.ListSEP) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, item := range sl.Data { + data, _ := json.Marshal(item.Config) + temp := map[string]interface{}{ + "ckey": item.CKey, + "meta": flattens.FlattenMeta(item.Meta), + "consumed_by": item.ConsumedBy, + "desc": item.Description, + "gid": item.GID, + "guid": item.GUID, + "sep_id": item.ID, + "milestones": item.Milestones, + "name": item.Name, + "obj_status": item.ObjStatus, + "provided_by": item.ProvidedBy, + "shared_with": item.SharedWith, + "tech_status": item.TechStatus, + "type": item.Type, + "config": string(data), + } + res = append(res, temp) + } + return res +} + +func flattenSepPool(rp *sep.RecordPool) []map[string]interface{} { + sh := make([]map[string]interface{}, 0) + res := map[string]interface{}{ + "access_account_ids": rp.AccessAccountIDs, + "access_res_group_ids": rp.AccessResGroupIDs, + "name": rp.Name, + "pagecache_ratio": rp.PageCacheRatio, + "reference_id": rp.ReferenceID, + "types": rp.Types, + "uris": flattenSepPoolUris(rp), + "usage_limit": rp.UsageLimit, + } + sh = append(sh, res) + return sh +} + +func flattenSepPoolUris(rp *sep.RecordPool) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, ur := range rp.URIs { + temp := map[string]interface{}{ + "ip": ur.IP, + "port": ur.Port, + } + res = append(res, temp) + } + return res +} + +func flattenSepConsumption(sc sep.Total) []map[string]interface{} { + sh := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "capacity_limit": sc.CapacityLimit, + "disk_count": sc.DiskCount, + "disk_usage": sc.DiskUsage, + "snapshot_count": sc.SnapshotCount, + "snapshot_usage": sc.SnapshotUsage, + "usage": sc.Usage, + "usage_limit": sc.UsageLimit, + } + sh = append(sh, temp) + return sh +} + +func flattenSepConsumptionPools(bp *sep.RecordConsumption) []map[string]interface{} { + sh := make([]map[string]interface{}, 0) + for key, value := range bp.ByPool { + temp := map[string]interface{}{ + "name": key, + "disk_count": value.DiskCount, + "disk_usage": value.DiskUsage, + "snapshot_count": value.SnapshotCount, + "snapshot_usage": value.SnapshotUsage, + "usage": value.Usage, + "usage_limit": value.UsageLimit, + } + sh = append(sh, temp) + } + return sh +} diff --git a/internal/service/cloudbroker/sep/models.go b/internal/service/cloudbroker/sep/models.go deleted file mode 100644 index 346aae2..0000000 --- a/internal/service/cloudbroker/sep/models.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package sep - -///Sep Models -type SepConsumptionInd struct { - DiskCount int `json:"disk_count"` - DiskUsage int `json:"disk_usage"` - SnapshotCount int `json:"snapshot_count"` - SnapshotUsage int `json:"snapshot_usage"` - Usage int `json:"usage"` - UsageLimit int `json:"usage_limit"` -} - -type SepConsumptionTotal struct { - CapacityLimit int `json:"capacity_limit"` - SepConsumptionInd -} - -type SepConsumption struct { - Total SepConsumptionTotal `json:"total"` - Type string `json:"type"` - ByPool map[string]SepConsumptionInd `json:"byPool"` -} - -type SepDiskList []int - -type Sep struct { - Ckey string `json:"_ckey"` - Meta []interface{} `json:"_meta"` - ConsumedBy []int `json:"consumedBy"` - Desc string `json:"desc"` - Gid int `json:"gid"` - Guid int `json:"guid"` - Id int `json:"id"` - Milestones int `json:"milestones"` - Name string `json:"name"` - ObjStatus string `json:"objStatus"` - ProvidedBy []int `json:"providedBy"` - TechStatus string `json:"techStatus"` - Type string `json:"type"` - Config SepConfig `json:"config"` -} - -type SepConfig map[string]interface{} - -type SepList []Sep -type SepPool map[string]interface{} diff --git a/internal/service/cloudbroker/sep/resource_check_input_values.go b/internal/service/cloudbroker/sep/resource_check_input_values.go new file mode 100644 index 0000000..9f2f43b --- /dev/null +++ b/internal/service/cloudbroker/sep/resource_check_input_values.go @@ -0,0 +1,69 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package sep + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func existGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + gid := uint64(d.Get("gid").(int)) + req := grid.ListRequest{} + + gridList, err := c.CloudBroker().Grid().List(ctx, req) + if err != nil { + return false, err + } + + return len(gridList.FilterByID(gid).Data) != 0, nil +} + +func resourceSepConfigExists(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + log.Debugf("resourceSepConfigExists: called for sep id: %d", d.Get("sep_id").(int)) + + sepDesConfig, err := utilitySepConfigCheckPresence(ctx, d, m) + if sepDesConfig == nil { + if err != nil { + return false, err + } + return false, nil + } + + return true, nil +} diff --git a/internal/service/cloudbroker/sep/resource_sep.go b/internal/service/cloudbroker/sep/resource_sep.go index 1a445ef..3e4afba 100644 --- a/internal/service/cloudbroker/sep/resource_sep.go +++ b/internal/service/cloudbroker/sep/resource_sep.go @@ -1,8 +1,9 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Sergey Kisil, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -51,8 +52,16 @@ func resourceSepCreate(ctx context.Context, d *schema.ResourceData, m interface{ c := m.(*controller.ControllerCfg) req := sep.CreateRequest{} - req.Name = d.Get("name").(string) + haveGID, err := existGID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveGID { + return diag.Errorf("resourceSepCreate: can't create Sep because GID %d is not allowed or does not exist", d.Get("gid").(int)) + } req.GID = uint64(d.Get("gid").(int)) + + req.Name = d.Get("name").(string) req.SEPType = d.Get("type").(string) if desc, ok := d.GetOk("desc"); ok { @@ -110,6 +119,7 @@ func resourceSepRead(ctx context.Context, d *schema.ResourceData, m interface{}) d.Set("name", sep.Name) d.Set("obj_status", sep.ObjStatus) d.Set("provided_by", sep.ProvidedBy) + d.Set("shared_with", sep.SharedWith) d.Set("tech_status", sep.TechStatus) d.Set("type", sep.Type) data, _ := json.Marshal(sep.Config) @@ -148,6 +158,14 @@ func resourceSepUpdate(ctx context.Context, d *schema.ResourceData, m interface{ log.Debugf("resourceSepEdit: called for %s, id: %d", d.Get("name").(string), d.Get("sep_id").(int)) c := m.(*controller.ControllerCfg) + haveGID, err := existGID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveGID { + return diag.Errorf("resourceSepUpdate: can't update Sep because GID %d is not allowed or does not exist", d.Get("gid").(int)) + } + if d.HasChange("decommission") { decommission := d.Get("decommission").(bool) if decommission { @@ -236,11 +254,7 @@ func resourceSepUpdate(ctx context.Context, d *schema.ResourceData, m interface{ } } - if diagnostics := resourceSepRead(ctx, d, m); diagnostics != nil { - return diagnostics - } - - return nil + return resourceSepRead(ctx, d, m) } func resourceSepChangeEnabled(ctx context.Context, d *schema.ResourceData, m interface{}) error { @@ -273,64 +287,45 @@ func resourceSepChangeEnabled(ctx context.Context, d *schema.ResourceData, m int func resourceSepUpdateNodes(ctx context.Context, d *schema.ResourceData, m interface{}) error { log.Debugf("resourceSepUpdateNodes: called for %s, id: %d", d.Get("name").(string), d.Get("sep_id").(int)) c := m.(*controller.ControllerCfg) - delReq := sep.DelConsumerNodesRequest{ - SEPID: uint64(d.Get("sep_id").(int)), - } - - addReq := sep.AddConsumerNodesRequest{ - SEPID: uint64(d.Get("sep_id").(int)), - } - - toDel := false - - t1, t2 := d.GetChange("consumed_by") - - consumedIds := make([]interface{}, 0) - if d1, d2 := t1.([]interface{}), t2.([]interface{}); len(d1) > len(d2) { - for _, n := range d2 { - if !findElInt(d1, n) { - consumedIds = append(consumedIds, n) - } - } - toDel = true - } else { - consumedIds = d.Get("consumed_by").([]interface{}) - } + oldSet, newSet := d.GetChange("consumed_by") - var consumerNIDs []uint64 - for _, consumedId := range consumedIds { - consumerNIDs = append(consumerNIDs, uint64(consumedId.(int))) + deletedConsumed := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + var consumerDelIds []uint64 + for _, deletedInterface := range deletedConsumed { + deletedItem := deletedInterface.(int) + consumerDelIds = append(consumerDelIds, uint64(deletedItem)) } - - if toDel { - delReq.ConsumerNIDs = consumerNIDs - - _, err := c.CloudBroker().SEP().DelConsumerNodes(ctx, delReq) + if len(consumerDelIds) != 0 { + reqDel := sep.DelConsumerNodesRequest{ + SEPID: uint64(d.Get("sep_id").(int)), + ConsumerNIDs: consumerDelIds, + } + _, err := c.CloudBroker().SEP().DelConsumerNodes(ctx, reqDel) if err != nil { return err } - } else { - addReq.ConsumerNIDs = consumerNIDs + } - _, err := c.CloudBroker().SEP().AddConsumerNodes(ctx, addReq) + addedConsumed := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + var consumerAddIds []uint64 + for _, addedInterface := range addedConsumed { + AddedItem := addedInterface.(int) + consumerAddIds = append(consumerAddIds, uint64(AddedItem)) + } + if len(consumerAddIds) != 0 { + reqAdd := sep.AddConsumerNodesRequest{ + SEPID: uint64(d.Get("sep_id").(int)), + ConsumerNIDs: consumerAddIds, + } + _, err := c.CloudBroker().SEP().AddConsumerNodes(ctx, reqAdd) if err != nil { return err } } - return nil } -func findElInt(sl []interface{}, el interface{}) bool { - for _, e := range sl { - if e.(int) == el.(int) { - return true - } - } - return false -} - func resourceSepUpdateProviders(ctx context.Context, d *schema.ResourceData, m interface{}) error { log.Debugf("resourceSepUpdateProviders: called for %s, id: %d", d.Get("name").(string), d.Get("sep_id").(int)) c := m.(*controller.ControllerCfg) @@ -396,7 +391,7 @@ func resourceSepSchemaMake() map[string]*schema.Schema { }, }, "consumed_by": { - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, Computed: true, Elem: &schema.Schema{ @@ -441,6 +436,14 @@ func resourceSepSchemaMake() map[string]*schema.Schema { }, Description: "list of provider nodes IDs", }, + "shared_with": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, "tech_status": { Type: schema.TypeString, Computed: true, diff --git a/internal/service/cloudbroker/sep/resource_sep_config.go b/internal/service/cloudbroker/sep/resource_sep_config.go index fe4ea67..1dd62e2 100644 --- a/internal/service/cloudbroker/sep/resource_sep_config.go +++ b/internal/service/cloudbroker/sep/resource_sep_config.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -85,20 +85,6 @@ func resourceSepConfigDelete(ctx context.Context, d *schema.ResourceData, m inte return nil } -func resourceSepConfigExists(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { - log.Debugf("resourceSepConfigExists: called for sep id: %d", d.Get("sep_id").(int)) - - sepDesConfig, err := utilitySepConfigCheckPresence(ctx, d, m) - if sepDesConfig == nil { - if err != nil { - return false, err - } - return false, nil - } - - return true, nil -} - func resourceSepConfigUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Debugf("resourceSepConfigEdit: called for sep id: %d", d.Get("sep_id").(int)) c := m.(*controller.ControllerCfg) diff --git a/internal/service/cloudbroker/sep/utility_sep.go b/internal/service/cloudbroker/sep/utility_sep.go index d9828dc..aeae4e8 100644 --- a/internal/service/cloudbroker/sep/utility_sep.go +++ b/internal/service/cloudbroker/sep/utility_sep.go @@ -1,8 +1,9 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Sergey Kisil, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -46,11 +47,16 @@ func utilitySepCheckPresence(ctx context.Context, d *schema.ResourceData, m inte c := m.(*controller.ControllerCfg) req := sep.GetRequest{} - if d.Get("sep_id").(int) == 0 { + if d.Get("sep_id") != nil { + if d.Get("sep_id").(int) == 0 { + id, _ := strconv.ParseUint(d.Id(), 10, 64) + req.SEPID = id + } else { + req.SEPID = uint64(d.Get("sep_id").(int)) + } + } else { id, _ := strconv.ParseUint(d.Id(), 10, 64) req.SEPID = id - } else { - req.SEPID = uint64(d.Get("sep_id").(int)) } log.Debugf("utilitySepCheckPresence: load sep") diff --git a/internal/service/cloudbroker/sep/utility_sep_config.go b/internal/service/cloudbroker/sep/utility_sep_config.go index 5081371..2e3a91e 100644 --- a/internal/service/cloudbroker/sep/utility_sep_config.go +++ b/internal/service/cloudbroker/sep/utility_sep_config.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, diff --git a/internal/service/cloudbroker/sep/utility_sep_consumption.go b/internal/service/cloudbroker/sep/utility_sep_consumption.go index 36f089c..ef8bf5d 100644 --- a/internal/service/cloudbroker/sep/utility_sep_consumption.go +++ b/internal/service/cloudbroker/sep/utility_sep_consumption.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, diff --git a/internal/service/cloudbroker/sep/utility_sep_disk_list.go b/internal/service/cloudbroker/sep/utility_sep_disk_list.go index 3b328f8..9f5446a 100644 --- a/internal/service/cloudbroker/sep/utility_sep_disk_list.go +++ b/internal/service/cloudbroker/sep/utility_sep_disk_list.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, diff --git a/internal/service/cloudbroker/sep/utility_sep_list.go b/internal/service/cloudbroker/sep/utility_sep_list.go index cb43c04..d6acd93 100644 --- a/internal/service/cloudbroker/sep/utility_sep_list.go +++ b/internal/service/cloudbroker/sep/utility_sep_list.go @@ -1,8 +1,9 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Sergey Kisil, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -45,6 +46,27 @@ func utilitySepListCheckPresence(ctx context.Context, d *schema.ResourceData, m c := m.(*controller.ControllerCfg) req := sep.ListRequest{} + if by_id, ok := d.GetOk("by_id"); ok { + req.ByID = uint64(by_id.(int)) + } + if name, ok := d.GetOk("name"); ok { + req.Name = string(name.(string)) + } + if gid, ok := d.GetOk("gid"); ok { + req.GID = uint64(gid.(int)) + } + if type_, ok := d.GetOk("type"); ok { + req.Type = string(type_.(string)) + } + if provided_by, ok := d.GetOk("provided_by"); ok { + req.ProvidedBy = uint64(provided_by.(int)) + } + if tech_status, ok := d.GetOk("tech_status"); ok { + req.TechStatus = string(tech_status.(string)) + } + if consumed_by, ok := d.GetOk("consumed_by"); ok { + req.ConsumedBy = uint64(consumed_by.(int)) + } if page, ok := d.GetOk("page"); ok { req.Page = uint64(page.(int)) } diff --git a/internal/service/cloudbroker/sep/utility_sep_pool.go b/internal/service/cloudbroker/sep/utility_sep_pool.go index ff80560..8250ac8 100644 --- a/internal/service/cloudbroker/sep/utility_sep_pool.go +++ b/internal/service/cloudbroker/sep/utility_sep_pool.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, diff --git a/internal/service/cloudbroker/stack/data_source_stack_list.go b/internal/service/cloudbroker/stack/data_source_stack_list.go new file mode 100644 index 0000000..c7efdf9 --- /dev/null +++ b/internal/service/cloudbroker/stack/data_source_stack_list.go @@ -0,0 +1,119 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package stack + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceStackListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + stackList, err := utilityStackListCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenStacksList(stackList)) + d.Set("entry_count", stackList.EntryCount) + + return nil +} + +func dataSourceStaksListSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "by_id": { + Type: schema.TypeInt, + Optional: true, + Description: "by_id", + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: "name", + }, + "type": { + Type: schema.TypeString, + Optional: true, + Description: "type", + }, + "status": { + Type: schema.TypeString, + Optional: true, + Description: "type", + }, + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "page number", + }, + "size": { + Type: schema.TypeInt, + Optional: true, + Description: "page size", + }, + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: dataSourceStackSchemaMake(), + }, + Description: "items of stacks list", + }, + "entry_count": { + Type: schema.TypeInt, + Computed: true, + Description: "entry_count", + }, + } +} + +func DataSourceStacksList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceStackListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceStaksListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/stack/data_sourse_stack.go b/internal/service/cloudbroker/stack/data_sourse_stack.go new file mode 100644 index 0000000..af52c13 --- /dev/null +++ b/internal/service/cloudbroker/stack/data_sourse_stack.go @@ -0,0 +1,319 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package stack + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceStackRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + stack, err := utilityStackCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + flattenStack(d, stack) + d.SetId(strconv.Itoa(d.Get("stack_id").(int))) + + return nil +} + +func dataSourceStackSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "stack_id": { + Type: schema.TypeInt, + Required: true, + Description: "stack_id", + }, + "ckey": { + Type: schema.TypeString, + Computed: true, + Description: "ckey", + }, + "meta": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "meta", + }, + "api_url": { + Type: schema.TypeString, + Computed: true, + Description: "api_url", + }, + "api_key": { + Type: schema.TypeString, + Computed: true, + Description: "api_key", + }, + "app_id": { + Type: schema.TypeString, + Computed: true, + Description: "api_id", + }, + "cpu_allocation_ratio": { + Type: schema.TypeFloat, + Computed: true, + Description: "cpu_allocation_ratio", + }, + "description": { + Type: schema.TypeString, + Computed: true, + Description: "description", + }, + "descr": { + Type: schema.TypeString, + Computed: true, + Description: "descr", + }, + "drivers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "drivers", + }, + "eco": { + Type: schema.TypeString, + Computed: true, + Description: "eco", + }, + "error": { + Type: schema.TypeInt, + Computed: true, + Description: "error", + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + Description: "gid", + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + Description: "guid", + }, + "images": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + Description: "images", + }, + "login": { + Type: schema.TypeString, + Computed: true, + Description: "login", + }, + "mem_allocation_ratio": { + Type: schema.TypeFloat, + Computed: true, + Description: "mem_allocation_ratio", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "name", + }, + "packages": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: packagesSchemaMake(), + }, + }, + "passwd": { + Type: schema.TypeString, + Computed: true, + Description: "password", + }, + "reference_id": { + Type: schema.TypeString, + Computed: true, + Description: "reference_id", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "status", + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: "type", + }, + } +} + +func packagesSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "libvirt_bin": { + Type: schema.TypeList, + Computed: true, + Description: "libvirt_bin", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "installed_size": { + Type: schema.TypeString, + Computed: true, + Description: "installed_size", + }, + "ver": { + Type: schema.TypeString, + Computed: true, + Description: "ver", + }, + }, + }, + }, + "lvm2_lockd": { + Type: schema.TypeList, + Computed: true, + Description: "lvm2_lockd", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "installed_size": { + Type: schema.TypeString, + Computed: true, + Description: "installed_size", + }, + "ver": { + Type: schema.TypeString, + Computed: true, + Description: "ver", + }, + }, + }, + }, + "openvswitch_common": { + Type: schema.TypeList, + Computed: true, + Description: "openvswitch_common", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "installed_size": { + Type: schema.TypeString, + Computed: true, + Description: "installed_size", + }, + "ver": { + Type: schema.TypeString, + Computed: true, + Description: "ver", + }, + }, + }, + }, + "openvswitch_switch": { + Type: schema.TypeList, + Computed: true, + Description: "openvswitch_switch", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "installed_size": { + Type: schema.TypeString, + Computed: true, + Description: "installed_size", + }, + "ver": { + Type: schema.TypeString, + Computed: true, + Description: "ver", + }, + }, + }, + }, + "qemu_system_x86": { + Type: schema.TypeList, + Computed: true, + Description: "qemu_system_x86", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "installed_size": { + Type: schema.TypeString, + Computed: true, + Description: "installed_size", + }, + "ver": { + Type: schema.TypeString, + Computed: true, + Description: "ver", + }, + }, + }, + }, + "sanlock": { + Type: schema.TypeList, + Computed: true, + Description: "sanlock", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "installed_size": { + Type: schema.TypeString, + Computed: true, + Description: "installed_size", + }, + "ver": { + Type: schema.TypeString, + Computed: true, + Description: "ver", + }, + }, + }, + }, + } + return res +} + +func DataSourceStack() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceStackRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceStackSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/stack/flattens.go b/internal/service/cloudbroker/stack/flattens.go new file mode 100644 index 0000000..634e9c4 --- /dev/null +++ b/internal/service/cloudbroker/stack/flattens.go @@ -0,0 +1,190 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package stack + +import ( + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/stack" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" +) + +func flattenStack(d *schema.ResourceData, st *stack.InfoStack) { + d.Set("ckey", st.Ckey) + d.Set("meta", flattens.FlattenMeta(st.Meta)) + d.Set("api_url", st.APIURL) + d.Set("api_key", st.Apikey) + d.Set("app_id", st.AppID) + d.Set("cpu_allocation_ratio", st.CPUAllocationRatio) + d.Set("description", st.Description) + d.Set("descr", st.Descr) + d.Set("drivers", st.Drivers) + d.Set("eco", flattenEco(st.Eco)) + d.Set("error", st.Error) + d.Set("gid", st.GID) + d.Set("guid", st.GUID) + d.Set("stack_id", st.ID) + d.Set("images", st.Images) + d.Set("login", st.Login) + d.Set("mem_allocation_ratio", st.MemAllocationRatio) + d.Set("name", st.Name) + d.Set("packages", flattenPackages(st.Packages)) + d.Set("passwd", st.Password) + d.Set("reference_id", st.ReferenceID) + d.Set("status", st.Status) + d.Set("type", st.Type) +} + +func flattenPackages(pg stack.Packages) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "libvirt_bin": flattenLibvirtBin (pg), + "lvm2_lockd": flattenLvm2Lockd (pg), + "openvswitch_common": flattenOpenvswitchCommon (pg), + "openvswitch_switch": flattenOpenvswitchSwitch (pg), + "qemu_system_x86": flattenQemuSystemX86 (pg), + "sanlock": flattenSanlock (pg), + } + res = append(res, temp) + return res +} + +func flattenLibvirtBin(lb stack.Packages) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "installed_size": lb.LibvirtBin.InstalledSize, + "ver": lb.LibvirtBin.Ver, + } + res = append(res, temp) + return res +} + +func flattenLvm2Lockd(ll stack.Packages) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "installed_size": ll.Lvm2Lockd.InstalledSize, + "ver": ll.Lvm2Lockd.Ver, + } + res = append(res, temp) + return res +} + +func flattenOpenvswitchCommon(oc stack.Packages) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "installed_size": oc.OpenvswitchCommon.InstalledSize, + "ver": oc.OpenvswitchCommon.Ver, + } + res = append(res, temp) + return res +} + +func flattenOpenvswitchSwitch(os stack.Packages) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "installed_size": os.OpenvswitchSwitch.InstalledSize, + "ver": os.OpenvswitchSwitch.Ver, + } + res = append(res, temp) + return res +} + +func flattenQemuSystemX86(qs stack.Packages) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "installed_size": qs.QemuSystemX86.InstalledSize, + "ver": qs.QemuSystemX86.Ver, + } + res = append(res, temp) + return res +} + +func flattenSanlock(sl stack.Packages) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "installed_size": sl.Sanlock.InstalledSize, + "ver": sl.Sanlock.Ver, + } + res = append(res, temp) + return res +} + + +func flattenEco(m interface{}) string { + switch d := m.(type) { + case string: + return d + case int: + return strconv.Itoa(d) + case int64: + return strconv.FormatInt(d, 10) + case float64: + return strconv.FormatInt(int64(d), 10) + default: + return "" + } +} + +func flattenStacksList(sl *stack.ListStacks) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(sl.Data)) + for _, item := range sl.Data { + temp := map[string]interface{}{ + "ckey": item.Ckey, + "meta": flattens.FlattenMeta(item.Meta), + "api_url": item.APIURL, + "api_key": item.Apikey, + "app_id": item.AppID, + "cpu_allocation_ratio": item.CPUAllocationRatio, + "description": item.Description, + "descr": item.Descr, + "drivers": item.Drivers, + "eco": flattenEco(item.Eco), + "error": item.Error, + "gid": item.GID, + "guid": item.GUID, + "stack_id": item.ID, + "images": item.Images, + "login": item.Login, + "mem_allocation_ratio": item.MemAllocationRatio, + "name": item.Name, + "packages": flattenPackages(item.Packages), + "passwd": item.Password, + "reference_id": item.ReferenceID, + "status": item.Status, + "type": item.Type, + } + res = append(res, temp) + } + return res +} diff --git a/internal/service/cloudbroker/stack/utility_stack.go b/internal/service/cloudbroker/stack/utility_stack.go new file mode 100644 index 0000000..efeec61 --- /dev/null +++ b/internal/service/cloudbroker/stack/utility_stack.go @@ -0,0 +1,64 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package stack + +import ( + "context" + "strconv" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/stack" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityStackCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*stack.InfoStack, error) { + c := m.(*controller.ControllerCfg) + req := stack.GetRequest{} + + if d.Id() != "" { + id, _ := strconv.ParseUint(d.Id(), 10, 64) + req.StackId = id + } else { + req.StackId = uint64(d.Get("stack_id").(int)) + } + + log.Debugf("utilityStackListCheckPresence: load stack list") + stackInfo, err := c.CloudBroker().Stack().Get(ctx, req) + if err != nil { + return nil, err + } + + return stackInfo, nil +} diff --git a/internal/service/cloudbroker/stack/utility_stack_list.go b/internal/service/cloudbroker/stack/utility_stack_list.go new file mode 100644 index 0000000..50294d8 --- /dev/null +++ b/internal/service/cloudbroker/stack/utility_stack_list.go @@ -0,0 +1,75 @@ +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package stack + +import ( + "context" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/stack" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityStackListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*stack.ListStacks, error) { + c := m.(*controller.ControllerCfg) + req := stack.ListRequest{} + + if ByID, ok := d.GetOk("by_id"); ok { + req.ByID = uint64(ByID.(int)) + } + if Name, ok := d.GetOk("name"); ok { + req.Name = string(Name.(string)) + } + if Type, ok := d.GetOk("type"); ok { + req.Type = string(Type.(string)) + } + if Status, ok := d.GetOk("status"); ok { + req.Status = string(Status.(string)) + } + if Page, ok := d.GetOk("page"); ok { + req.Page = uint64(Page.(int)) + } + if Size, ok := d.GetOk("size"); ok { + req.Size = uint64(Size.(int)) + } + + log.Debugf("utilityStackListCheckPresence: load stack list") + stackList, err := c.CloudBroker().Stack().List(ctx, req) + if err != nil { + return nil, err + } + + return stackList, nil +} diff --git a/samples/cloudapi/resource_k8s_cp/initconfig.tftpl b/samples/cloudapi/resource_k8s_cp/initconfig.tftpl new file mode 100644 index 0000000..06df7c0 --- /dev/null +++ b/samples/cloudapi/resource_k8s_cp/initconfig.tftpl @@ -0,0 +1,9 @@ +--- +users: +- groups: users, wheel + name: user + plain_text_passwd: examplePassword + primary_group: user + ssh_authorized_keys: + - ssh-rsa EXAMPLE%id_rsa.pub + sudo: ALL=(ALL) NOPASSWD:ALL diff --git a/samples/cloudapi/resource_k8s_cp/main.tf b/samples/cloudapi/resource_k8s_cp/main.tf index 24295a6..f206b40 100644 --- a/samples/cloudapi/resource_k8s_cp/main.tf +++ b/samples/cloudapi/resource_k8s_cp/main.tf @@ -66,10 +66,15 @@ resource "decort_k8s_cp" "cp" { # string desc = "" - # ID внешней сети - # Опциональный параметр - # id - extnet_id = 13 + # id extnet + #опциональный параметр + #тип - число + extnet_id = 0 + + # id vins + # опциональный параметр + # тип - число + vins_id = 1234 # Storage Endpoint ID # Опциональный параметр @@ -90,6 +95,59 @@ resource "decort_k8s_cp" "cp" { # Опциональный параметр # bool with_lb = true + + # позволяет создать схему отказоустройчивой LB + # опциональный параметр + # тип - булев тип + ha_mode = true + + # дополнительные SAN (Subject Alternative Names) для использования в процессе автоматического выписывания сертификата Кластера Kubernetes; + # возможность взаимодействовать с кластером по FQDN + # параметр получает список строк – IP-адреса и/или DNS (по формату RFC 1123 c поддержкой wildcard) + # опциональный параметр + # тип - массив строк + additional_sans = ["192.168.201.0","192.168.201.1"] + + # используется для определения настроек и действий, которые должны быть выполнены перед запуском любого другого компонента в кластере + # это позволяет вам настраивать такие вещи, как регистрация node, настройка network и другие задачи инициализации + # опциональный параметр + # тип - строка + init_config = "{JSON string}" + + # используется для определения глобальных настроек и конфигураций для всего кластера + # он включает в себя такие параметры, как имя кластера, настройки DNS, методы аутентификации и другие конфигурации в масштабах кластера + # опциональный параметр + # тип - строка + cluster_config = "{JSON string}" + + # используется для настройки поведения и параметров Kubelet, который является агентом primary node, запускаемым на каждом node кластера + # он включает в себя такие параметры, как IP-адрес node, распределение ресурсов, политики удаления модулей и другие конфигурации, специфичные для Kubelet + # опциональный параметр + # тип - строка + kubelet_config = "{JSON string}" + + # используется для настройки поведения и параметров присоединения node к кластеру + # он включает в себя такие параметры, как режим прокси-сервера, диапазоны IP-адресов кластера и другие конфигурации, специфичные для Kube-proxy + # опциональный параметр + # тип - строка + kube_proxy_config = "{JSON string}" + + # используется для настройки поведения и параметров присоединения node к кластеру + # он включает в себя такие параметры, как cluster's control plane endpoint, токен и ключ сертификата + # опциональный параметр + # тип - строка + join_config = "{JSON string}" + + # при создании кластре использовать подключение только к сети ExtNet + # опциональный параметр + # тип - булев тип + extnet_only = true + + # добавить ssl-сертификат в формате x509 pem + # необязательный параметр + # тип - файл + oidc_cert = file("ca.crt") + } output "cp_out" { diff --git a/samples/cloudbroker/data_account/main.tf b/samples/cloudbroker/data_account/main.tf new file mode 100644 index 0000000..17759f5 --- /dev/null +++ b/samples/cloudbroker/data_account/main.tf @@ -0,0 +1,39 @@ +/* +Пример использования +Получение информации об аккаунте + +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "" + source = "basis/decort/decort" + } + } +} +*/ + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_cb_account" "acc" { + #id аккаунта + #обязательный параметр + #тип - число + account_id = 11111 + +} + +output "test" { + value = data.decort_cb_account.acc +} diff --git a/samples/cloudbroker/data_account_audits_list/main.tf b/samples/cloudbroker/data_account_audits_list/main.tf index 150deb8..af918f4 100644 --- a/samples/cloudbroker/data_account_audits_list/main.tf +++ b/samples/cloudbroker/data_account_audits_list/main.tf @@ -27,7 +27,7 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_account_audits_list" "aal" { +data "decort_cb_account_audits_list" "aal" { #id аккаунта #обязательный параметр #тип - число @@ -36,5 +36,5 @@ data "decort_account_audits_list" "aal" { } output "test" { - value = data.decort_account_audits_list.aal + value = data.decort_cb_account_audits_list.aal } diff --git a/samples/cloudbroker/data_account_computes_list/main.tf b/samples/cloudbroker/data_account_computes_list/main.tf index b099eb2..bc98058 100644 --- a/samples/cloudbroker/data_account_computes_list/main.tf +++ b/samples/cloudbroker/data_account_computes_list/main.tf @@ -26,14 +26,65 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_account_computes_list" "acl" { +data "decort_cb_account_computes_list" "acl" { #id аккаунта #обязательный параметр #тип - число account_id = 1111 + #фильтр по id compute + #опциональный параметр + #тип - число + #compute_id = 100 + + #фильтр по имени compute + #опциональный параметр + #тип - строка + #name = "test" + + #фильтр по имени ресурсной группы + #опциональный параметр + #тип - строка + #rg_name = "test" + + #фильтр по id ресурсной группы + #опциональный параметр + #тип - число + #rg_id = 100 + + #фильтр по техническому статусу + #опциональный параметр + #тип - строка + #tech_status = "STARTED" + + #фильтр по ip address + #опциональный параметр + #тип - строка + #ip_address = "1.1.1.1.1" + + #фильтр по имени внешней сети + #опциональный параметр + #тип - строка + #extnet_name = "test" + + #фильтр по id внешней сети + #опциональный параметр + #тип - число + #extnet_id = 100 + + #номер страницы для отображения + #опциональный параметр + #тип - число + #если не задан - выводятся все доступные данные + #page = 1 + + #размер страницы + #опциональный параметр + #тип - число + #если не задан - выводятся все доступные данные + #size = 1 } output "test" { - value = data.decort_account_computes_list.acl + value = data.decort_cb_account_computes_list.acl } diff --git a/samples/cloudbroker/data_account_deleted_list/main.tf b/samples/cloudbroker/data_account_deleted_list/main.tf index 9a5f843..57c66be 100644 --- a/samples/cloudbroker/data_account_deleted_list/main.tf +++ b/samples/cloudbroker/data_account_deleted_list/main.tf @@ -26,7 +26,7 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_account_deleted_list" "adl" { +data "decort_cb_account_deleted_list" "adl" { #номер страницы для отображения #опциональный параметр #тип - число @@ -38,8 +38,24 @@ data "decort_account_deleted_list" "adl" { #тип - число #если не задан - выводятся все доступные данные #size = 3 + + #фильтр по id аккаунта + #опциональный параметр + #тип - число + #by_id = 100 + + #фильтр по имени аккаунта + #опциональный параметр + #тип - строка + #name = "test" + + #фильтр по ACL + #опциональный параметр + #тип - строка + #acl = "test" + } output "test" { - value = data.decort_account_deleted_list.adl + value = data.decort_cb_account_deleted_list.adl } diff --git a/samples/cloudbroker/data_account_disks_list/main.tf b/samples/cloudbroker/data_account_disks_list/main.tf index f795e9b..9007390 100644 --- a/samples/cloudbroker/data_account_disks_list/main.tf +++ b/samples/cloudbroker/data_account_disks_list/main.tf @@ -26,14 +26,44 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_account_disks_list" "adl" { +data "decort_cb_account_disks_list" "adl" { #id аккаунта #обязательный параметр #тип - число account_id = 11111 + #фильтр по id диска + #опциональный параметр + #тип - число + #disk_id = 100 + + #фильтр по имени диска + #опциональный параметр + #тип - строка + #name = "data_disk" + + #фильтр по максимальному размеру диска + #опциональный параметр + #тип - число + #disk_max_size = 100 + + #тип диска + #опциональный параметр + #тип - строка + #возможные типы: "b" - boot_disk, "d" - data_disk + #type = "d" + + #кол-во страниц для вывода + #опицональный параметр + #тип - число + #page = 1 + + #размер страницы + #опицональный параметр + #тип - число + #size = 1 } output "test" { - value = data.decort_account_disks_list.adl + value = data.decort_cb_account_disks_list.adl } diff --git a/samples/cloudbroker/data_account_flipgroups_list/main.tf b/samples/cloudbroker/data_account_flipgroups_list/main.tf index 01fd2d8..9b570d2 100644 --- a/samples/cloudbroker/data_account_flipgroups_list/main.tf +++ b/samples/cloudbroker/data_account_flipgroups_list/main.tf @@ -26,13 +26,54 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_account_flipgroups_list" "afgl" { - #id аккаунта +data "decort_cb_account_flipgroups_list" "afgl" { + #id аккаунта #обязательный параметр #тип - число account_id = 1111 + + #фильтр по имени flipgroup + #опциональный параметр + #тип - строка + #name = "test" + + #фильтр по id vins + #опциональный параметр + #тип - число + #vins_id = 100 + + #фильтр по имени vins + #опциональный параметр + #тип - строка + #vins_name = "test" + + #фильтр по id extnet + #опциональный параметр + #тип - число + #extnet_id = 100 + + #фильтр по IP + #опциональный параметр + #тип - строка + #by_ip = "1.1.1.1.1" + + #фильтр по id flipgroup + #опциональный параметр + #тип - число + #flipgroup_id = 100 + + #кол-во страниц для вывода + #опицональный параметр + #тип - число + #page = 1 + + #размер страницы + #опицональный параметр + #тип - число + #size = 1 + } output "test" { - value = data.decort_account_flipgroups_list.afgl + value = data.decort_cb_account_flipgroups_list.afgl } diff --git a/samples/cloudbroker/data_account_get_resource_consumption/main.tf b/samples/cloudbroker/data_account_get_resource_consumption/main.tf new file mode 100644 index 0000000..44b2560 --- /dev/null +++ b/samples/cloudbroker/data_account_get_resource_consumption/main.tf @@ -0,0 +1,37 @@ +/* +Получение списка текущего потребления ресурсов аккаунта +*/ + +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "" + source = "basis/decort/decort" + } + } +} +*/ +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://alpha.dev.decs.online" + #oauth2_url = + oauth2_url = "https://sso-alpha.dev.decs.online" + allow_unverified_ssl = true +} + +data "decort_cb_account_resource_consumption_get" "rc_get" { + #id аккаунта + #обязательный параметр + #тип - число + account_id = 111 +} + +output "test" { + value = data.decort_cb_account_resource_consumption_get.rc_get +} + diff --git a/samples/cloudbroker/data_account_list/main.tf b/samples/cloudbroker/data_account_list/main.tf index 7291312..26b82e9 100644 --- a/samples/cloudbroker/data_account_list/main.tf +++ b/samples/cloudbroker/data_account_list/main.tf @@ -25,7 +25,27 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_account_list" "al" { +data "decort_cb_account_list" "al" { + #фильтр по id аккаунта + #опциональный параметр + #тип - число + #by_id = 100 + + #фильтр по имени аккаунта + #опциональный параметр + #тип - строка + #name = "test" + + #фильтр по ACL + #опциональный параметр + #тип - строка + #acl = "test" + + #фильтр по статусу + #опциональный параметр + #тип - строка + #status = "ENABLED" + #номер страницы для отображения #опциональный параметр #тип - число @@ -40,5 +60,5 @@ data "decort_account_list" "al" { } output "test" { - value = data.decort_account_list.al + value = data.decort_cb_account_list.al } diff --git a/samples/cloudbroker/data_account_resource_consumption_list/main.tf b/samples/cloudbroker/data_account_resource_consumption_list/main.tf new file mode 100644 index 0000000..d34a93c --- /dev/null +++ b/samples/cloudbroker/data_account_resource_consumption_list/main.tf @@ -0,0 +1,32 @@ +/* +Получение списка текущего потребления ресурсов +*/ + +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "" + source = "basis/decort/decort" + } + } +} +*/ +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://alpha.dev.decs.online" + #oauth2_url = + oauth2_url = "https://sso-alpha.dev.decs.online" + allow_unverified_ssl = true +} + +data "decort_cb_account_resource_consumption_list" "rc_list" { +} + +output "test" { + value = data.decort_cb_account_resource_consumption_list.rc_list +} diff --git a/samples/cloudbroker/data_account_rg_list/main.tf b/samples/cloudbroker/data_account_rg_list/main.tf index dae55f4..3824e28 100644 --- a/samples/cloudbroker/data_account_rg_list/main.tf +++ b/samples/cloudbroker/data_account_rg_list/main.tf @@ -25,13 +25,50 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_account_rg_list" "argl" { - #id аккаунта +data "decort_cb_account_rg_list" "argl" { + #id аккаунта #обязательный параметр #тип - число account_id = 66666 + + #номер страницы для отображения + #опциональный параметр + #тип - число + #если не задан - выводятся все доступные данные + #page = 2 + + #размер страницы + #опциональный параметр + #тип - число + #если не задан - выводятся все доступные данные + #size = 3 + + #фильтр по id ресурсной группы + #опциональный параметр + #тип - число + #rg_id = 11111 + + #фильтр по имени ресурсной группы + #опциональный параметр + #тип - строка + #name = "test" + + #фильтр по id vins + #опциональный параметр + #тип - число + #vins_id = 100 + + #фильтр по id compute + #опциональный параметр + #тип - число + #vm_id = 100 + + #фильтр по статусу + #опциональный параметр + #тип - строка + #status = "CREATED" } output "test" { - value = data.decort_account_rg_list.argl + value = data.decort_cb_account_rg_list.argl } diff --git a/samples/cloudbroker/data_account_vins_list/main.tf b/samples/cloudbroker/data_account_vins_list/main.tf index 66c336d..46a51d5 100644 --- a/samples/cloudbroker/data_account_vins_list/main.tf +++ b/samples/cloudbroker/data_account_vins_list/main.tf @@ -26,14 +26,47 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_account_vins_list" "avl" { - #id аккаунта +data "decort_cb_account_vins_list" "avl" { + #id аккаунта #обязательный параметр #тип - число account_id = 22222 + #фильтр по id vins + #опциональный параметр + #тип - число + #vins_id = 100 + + #фильтр по имени vins + #опциональный параметр + #тип - строка + #name = "test" + + #фильтр по id ресурсной группы + #опциональный параметр + #тип - число + #rg_id = 11111 + + #фильтр по IP внешней сети + #опциональный параметр + #тип - строка + #ext_ip = "test" + + #номер страницы для отображения + #опциональный параметр + #тип - число + #если не задан - выводятся все доступные данные + #page = 2 + + #размер страницы + #опциональный параметр + #тип - число + #если не задан - выводятся все доступные данные + #size = 3 + + } output "test" { - value = data.decort_account_vins_list.avl + value = data.decort_cb_account_vins_list.avl } diff --git a/samples/cloudbroker/data_extnet/main.tf b/samples/cloudbroker/data_extnet/main.tf index 400c143..7d04369 100644 --- a/samples/cloudbroker/data_extnet/main.tf +++ b/samples/cloudbroker/data_extnet/main.tf @@ -1,6 +1,6 @@ /* Пример использования -Получение списка доступных дисков +Получение информации о сети */ #Расскомментируйте этот код, #и внесите необходимые правки в версию и путь, @@ -30,7 +30,7 @@ data "decort_cb_extnet" "ex" { # ID сети # Обязательный параметр # Тип - число - net_id = 13 + extnet_id = 13 } output "ex_out" { diff --git a/samples/cloudbroker/data_extnet_default/main.tf b/samples/cloudbroker/data_extnet_default/main.tf index 281ff03..a0bb01b 100644 --- a/samples/cloudbroker/data_extnet_default/main.tf +++ b/samples/cloudbroker/data_extnet_default/main.tf @@ -31,5 +31,5 @@ data "decort_cb_extnet_default" "ed" { } output "default_net_id_out" { - value = data.decort_cb_extnet_default.ed.net_id + value = data.decort_cb_extnet_default.ed } diff --git a/samples/cloudbroker/data_extnet_list/main.tf b/samples/cloudbroker/data_extnet_list/main.tf index 9ac0c2e..aa9c3cd 100644 --- a/samples/cloudbroker/data_extnet_list/main.tf +++ b/samples/cloudbroker/data_extnet_list/main.tf @@ -1,6 +1,6 @@ /* Пример использования -Получение списка доступных дисков +Получение списка сетей */ #Расскомментируйте этот код, #и внесите необходимые правки в версию и путь, @@ -27,15 +27,50 @@ provider "decort" { } data "decort_cb_extnet_list" "ex_list" { - # Фильтр по имени - # Опциональный параметр - # Тип - строка - name = "test_name" - - # Фильтр по статусу - # Опциональный параметр - # Тип - строка - status = "ENABLED" + #id аккаунта для фильтрации результата + #необязательный параметр + #тип - число + #account_id = 1111111 + + #фильтр по id внешней сети + #опциональный параметр + #тип - число + #by_id = 100 + + #фильтр по имени внешней сети + #опциональный параметр + #тип - строка + #name = "test" + + #фильтр по IP внешней сети + #опциональный параметр + #тип - строка + #network = "test" + + #фильтр по id vlan + #опциональный параметр + #тип - число + #vlan_id = 100 + + #фильтр по id vnfDevices + #опциональный параметр + #тип - число + #vnfdev_id = 100 + + #фильтр по статусу + #опциональный параметр + #тип - строка + #status = "ENABLED" + + #кол-во страниц для вывода + #опицональный параметр + #тип - число + #page = 1 + + #размер страницы + #опицональный параметр + #тип - число + #size = 1 } output "ex_out" { diff --git a/samples/cloudbroker/data_extnet_static_route/main.tf b/samples/cloudbroker/data_extnet_static_route/main.tf new file mode 100644 index 0000000..b929a5d --- /dev/null +++ b/samples/cloudbroker/data_extnet_static_route/main.tf @@ -0,0 +1,44 @@ +/* +Пример использования +Получение информации о static routes по id в данном extnet + +*/ + +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "" + source = "basis/decort/decort" + } + } +} +*/ + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://mr4.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_cb_extnet_static_route" "route" { + #обязательный параметр + #id extnet в котором добавлены routes + #тип - число + extnet_id = 1111 + + #обязательный параметр + #id route + #тип - число + route_id = 1 +} + +output "route" { + value = data.decort_cb_extnet_static_route.route +} diff --git a/samples/cloudbroker/data_extnet_static_route_list/main.tf b/samples/cloudbroker/data_extnet_static_route_list/main.tf new file mode 100644 index 0000000..7e67aee --- /dev/null +++ b/samples/cloudbroker/data_extnet_static_route_list/main.tf @@ -0,0 +1,39 @@ +/* +Пример использования +Получение списка static routes в данном extnet +*/ + +/* +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "" + source = "basis/decort/decort" + } + } +} +*/ + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://mr4.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_cb_extnet_static_route_list" "list" { + #обязательный параметр + #id extnet в котором добавлены routes + #тип - число + extnet_id = 1111 +} + +output "list" { + value = data.decort_cb_extnet_static_route_list.list +} diff --git a/samples/cloudbroker/data_pcidevice/main.tf b/samples/cloudbroker/data_pcidevice/main.tf index dcf3cee..ab56434 100644 --- a/samples/cloudbroker/data_pcidevice/main.tf +++ b/samples/cloudbroker/data_pcidevice/main.tf @@ -25,9 +25,10 @@ provider "decort" { #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true -} + } + -data "decort_pcidevice" "pd" { +data "decort_cb_pcidevice" "pd" { #id устройства #обязательный параметр #тип - число @@ -35,5 +36,5 @@ data "decort_pcidevice" "pd" { } output "test" { - value = data.decort_pcidevice.pd + value = data.decort_cb_pcidevice.pd } diff --git a/samples/cloudbroker/data_pcidevice_list/main.tf b/samples/cloudbroker/data_pcidevice_list/main.tf index b649c98..ce4e000 100644 --- a/samples/cloudbroker/data_pcidevice_list/main.tf +++ b/samples/cloudbroker/data_pcidevice_list/main.tf @@ -25,10 +25,10 @@ provider "decort" { #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true -} + } -data "decort_pcidevice_list" "pdl" {} +data "decort_cb_pcidevice_list" "pdl" {} output "test" { - value = data.decort_pcidevice_list.pdl.items + value = data.decort_cb_pcidevice_list.pdl.items } diff --git a/samples/cloudbroker/resource_account/main.tf b/samples/cloudbroker/resource_account/main.tf new file mode 100644 index 0000000..21874c9 --- /dev/null +++ b/samples/cloudbroker/resource_account/main.tf @@ -0,0 +1,161 @@ +/* +Пример использования +Ресурса account +Ресурс позволяет: +1. Создавать аккаунт +2. Редактировать аккаунт +3. Удалять аккаунт + +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "" + source = "basis/decort/decort" + } + } +} +*/ + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +resource "decort_cb_account" "acc" { + #имя аккаунта + #обязательный параметр + #тип - строка + #используется при создании и редактировании аккаунта + account_name = "new_my_account" + + #имя пользователя - создателя аккаунта + #обязательный параметр + #тип - строка + username = "username@decs3o" + + #доступность аккаунта + #необязательный параметр + #тип - булев тип + #может применяться при создании аккаунта и редактировании аккаунта + enable = true + + #электронная почта, на которую будет отправлена информация о доступе + #необязательный параметр + #тип - строка + #применяется при создании аккаунта + emailaddress = "fff@fff.ff" + + #отправлять ли на электронную почту письмо о доступе + #необязательный параметр + #тип - булев тип + #применяется при создании аккаунта и редактировании аккаунта + send_access_emails = true + + #имена пулов + #необязательный параметр + #тип - список строк + #применяется при создании аккаунта и редактировании аккаунта + uniq_pools = ["sep1_poolName1", "sep2_poolName2"] + + #ограничение используемых ресурсов + #необязательный параметр + #тип - объект + #используется при создании и редактировании + resource_limits { + #кол-во используемых ядер cpu + #необязательный параметр + #тип - число + #если установлена -1 - кол-во неограничено + cu_c = 2 + + #кол-во используемой RAM в МБ + #необязательный параметр + #тип - число + #если установлена -1 - кол-во неограничено + cu_m = 1024 + + #размер дисков, в ГБ + #необязательный параметр + #тип - число + #если установлена -1 - размер неограничен + cu_dm = 23 + + #кол-во используемых публичных IP + #необязательный параметр + #тип - число + #если установлена -1 - кол-во неограничено + cu_i = 2 + + #ограничения на кол-во передачи данных, в ГБ + #необязательный параметр + #тип - число + #если установлена -1 - кол-во неограничено + cu_np = 2 + + #кол-во графических процессоров + #необязательный параметр + #тип - число + #если установлена -1 - кол-во неограничено + gpu_units = 2 + } + + #добавление/редактирование/удаление пользователей, к которым привязан аккаунт + #необязательный параметр + #тип - объект, кол-во таких объектов не ограничено + /*users { + #id пользователя + #обязательный параметр + #тип - строка + user_id = "username_2@decs3o" + + #тип доступа пользователя + #обязательный параметр + #тип - строка + #возможные параметры: + #R - чтение + #RCX - запись + #ARCXDU - админ + access_type = "R" + + #рекурсивное удаление пользователя из всех ресурсов аккаунтов + #необязательный параметр + #тип - булев тип + #по-умолчанию - false + #применяется при удалении пользователя из аккаунта + recursive_delete = true + } + */ + + #параметр распределения CPU + #необязательный параметр + #тип - строка + #если "strict" виртуальная машина не может быть запущена из-за нехватки ресурсов. + #"loose" позволяет запускать виртуальную машину, если недостаточно ресурсов. + #cpu_allocation_parameter = "strict" + + #параметр коэффициента распределения CPU + #необязательный параметр + #тип - число + #один pCPU = коэффициент*vCPU (принимает нулевое или положительное значение) + #cpu_allocation_ratio = 1 + + #мгновеное удаление аккаунта, если да - то аккаунт невозможно будет восстановить + #необязательный параметр + #тип - булев тип + #используется при удалении аккаунта + #по-умолчанию - false + #permanently = true +} + +output "test" { + value = decort_cb_account.acc +} diff --git a/samples/cloudbroker/resource_extnet/main.tf b/samples/cloudbroker/resource_extnet/main.tf index 3a7051f..95baf3c 100644 --- a/samples/cloudbroker/resource_extnet/main.tf +++ b/samples/cloudbroker/resource_extnet/main.tf @@ -46,18 +46,64 @@ resource "decort_cb_extnet" "new_extnet" { # Тип - число vlan_id = 111 + # IP-адрес шлюза внешней сети + # Опциональный параметр + # Тип - строка + gateway = "test desc" + # Список DNS адресов # Опциональный параметр # Тип - массив строк dns = ["8.8.8.8", "9.9.9.9"] + # Список NTP адресов + # Опциональный параметр + # Тип - массив строк + ntp = ["ntp0.ntp-servers.net", "ntp1.ntp-servers.net"] + + # IP-адреса для проверки доступности сети + # Опциональный параметр + # Тип - массив строк + check_ips = ["191.255.0.0,", "191.255.0.0,"] + + # Если true - DHCP-сервер создан не будет + # Опциональный параметр + # Тип - булевое + virtual = true + # Описание # Опциональный параметр # Тип - строка desc = "test desc" + # Начало диапазона IP-адресов + # Опциональный параметр + # Тип - строка + start_ip = "191.255.0.0" + + # Окончание диапазона IP-адресов + # Опциональный параметр + # Тип - строка + end_ip = "191.255.0.40" + + # IP с которым будет создан vnfdev + # Опциональный параметр + # Тип - строка + vnfdev_ip = "191.255.0.20" + + # Количество предварительно созданных бронирований + # Опциональный параметр + # Тип - число + # По-умолчанию значение 128 + pre_reservations_num = "128" + + # Имя bridge Openvswitch для подключения к внешней сети + # Опциональный параметр + # Тип - строка + ovs_bridge = "some" + # Включение/Выключение внешней сети - # Опциональный параметр (default = true) + # Опциональный параметр # Тип - булев enable = true diff --git a/samples/cloudbroker/resource_extnet_static_route/main.tf b/samples/cloudbroker/resource_extnet_static_route/main.tf new file mode 100644 index 0000000..289eb45 --- /dev/null +++ b/samples/cloudbroker/resource_extnet_static_route/main.tf @@ -0,0 +1,66 @@ +/* +Пример использования +Ресурса extnet static routes +Ресурс позволяет: +1. Создавать static routes +2. Удалять static routes +3. Получать информацию о всех static routes в данном extnet +4. Предоставлять доступ виртуальным машинам к static routes +5. Удалять доступ виртуальным машинам к static routes + +*/ + +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "" + source = "basis/decort/decort" + } + } +} +*/ + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://mr4.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +resource "decort_cb_extnet_static_route" "sr" { + +#id extnet +#обязательный параметр +#тип - число +extnet_id = 1111 + +# destination network +# обязательный параметр +#тип - строка +destination = "192.168.201.0" + +# destination network mask +#обязательный параметр +#тип - строка +netmask = "255.255.255.255" + +# IP-адрес из пула свободных IP-адресов extnet ID +#обязательный параметр +#тип - строка +gateway = "192.168.201.40" + +# список виртуальных машин, которым будет предоставлен доступ к роуту +#опциональный параметр +#тип - массив чисел +compute_ids = [111,222] +} + +output "sr" { + value = decort_cb_extnet_static_route.sr +} diff --git a/samples/cloudbroker/resource_pcidevice/main.tf b/samples/cloudbroker/resource_pcidevice/main.tf index fb45f4e..dcebc72 100644 --- a/samples/cloudbroker/resource_pcidevice/main.tf +++ b/samples/cloudbroker/resource_pcidevice/main.tf @@ -32,7 +32,7 @@ provider "decort" { allow_unverified_ssl = true } -resource "decort_pcidevice" "pd" { +resource "decort_cb_pcidevice" "pd" { #имя устройства #обязательный параметр #тип - строка @@ -44,7 +44,7 @@ resource "decort_pcidevice" "pd" { hw_path = "0000:01:00.0" #описание устройства - #обязательный параметр + #необязательный параметр #тип - строка description = "test desc" @@ -64,6 +64,12 @@ resource "decort_pcidevice" "pd" { #тип - булево значение #enable = false + #принудительное отключение устройства + #опциональный параметр + #может использоваться на созданном ресурсе + #тип - булево значение + #force = true + #принудительное удаление устройства #опциональный параметр #используется при удалении ресурса @@ -81,5 +87,5 @@ resource "decort_pcidevice" "pd" { } output "test" { - value = decort_pcidevice.pd + value = decort_cb_pcidevice.pd }