main 4.5.2
loskutovanl 1 year ago
parent 20050bc169
commit 665a6c9fd8

@ -1,24 +1,36 @@
## Version 4.5.1 ## Version 4.5.2
## Feature
- Refactoring BVS config
- Added and updated data sources and resources for cloudbroker groups:
* account
* audit
* disks
* extnet
* flipgroup
* grid
* image
* k8ci
* k8s
* kvmvm (compute)
* lb (load balancer)
* pcidevice
* rg (resource group)
* sep
* stack
* vins
### Bugfix ### Bugfix
- Fixed description update for compute in cloudapi/kvmvm - Added resource_limits.cu_dm string field for account, account_consumed_units and account_reserved_units data sources in cloudapi/account
- Fixed restore of account resource in cloudapi/account, it happens if restore parameter is set to true
- Fixed restore of bservice resource in cloudapi/bservice, it happens if restore parameter is set to true and only once
- Fixed enable of bservice resource in cloudapi/bservice, it happens if enable parameter is set to true
- Fixed terraform state update for bservice resource update in cloudapi/bservice
- Fixed terraform state update for bservice_group resource update in cloudapi/bservice
- Added entry_count field for bservice_snapshot_list data source in cloudapi/bservice
- Added items.shareable boolean field for disk_list_unattached data source in cloudapi/disks
- Added items.status string field for extnet_list data source in cloudapi/extnet
- Fixed multiple minor typos in samples/cloudapi
- Added account_name string, created_by string, created_time int, deleted_by string, deleted_time int, network string, rg_id int, rg_name string, updated_by string, updated_by int fields for flipgroup data source and resource in cloudapi/flipgroup
- Removed net_mask string, ckey string fields for flipgroup data source and resource in cloudapi/flipgroup
- Add meta list field for flipgroup_list data source in cloudapi/flipgroup
- Fixed net_mask string field for fligroup_list data source in cloudapi/flipgroup
- Fixed id string field for image data source in cloudapi/image
- Fix account_name string field for k8s_list data source in cloudapi/k8s
- Add network_plugin string field for k8s and k8s_list data sources in cloudapi/k8s
- Fixed terraform state update for k8s, k8_cp and k8s_wg resources update in cloudapi/k8s
- Add image_name string, interfaces.enabled boolean fields for compute data source in cloudapi/kvmvm
- Add the interfaces field to the creation request in cloudapi/kvmvm
- Fixed restore of kvmvm resource in cloudapi/kvmvm, it happens if restore parameter is set to true
- The logic of network connection has been changed, only the first network connects to the kvmvm in the stopped status, the subsequent ones connect without stopping
- Token receipt has been removed from the controller
- Add start/stop function after create for lb in cloudapi/lb
- Fixed restore of lb resource in cloudapi/lb, it happens if restore parameter is set to true
- Add field auth_broker in data source location list in cloudapi/location
- Add fields cpu_allocation_parameter and cpu_allocation_ratio in data source rg list lb deleted in cloudapi/rg
- Add fields backend_haip and frontend_haip in data source rg list in cloudapi/rg
- Change type field Ram QuotaRecord struct in cloudapi/rg/models
- Add state upgrader for rg in cloudapi/rg
- Add field owner to the creation request in cloudapi/rg
- Fixed restore of rg resource in cloudapi/rg, it happens if restore parameter is set to true
- Add fields enabled and routesroutes in data source vins in cloudapi/vins

@ -7,7 +7,7 @@ ZIPDIR = ./zip
BINARY=${NAME} BINARY=${NAME}
WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH} WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH}
MAINPATH = ./cmd/decort/ MAINPATH = ./cmd/decort/
VERSION=4.5.1 VERSION=4.5.2
OS_ARCH=$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH) OS_ARCH=$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH)
FILES = ${BINARY}_${VERSION}_darwin_amd64\ FILES = ${BINARY}_${VERSION}_darwin_amd64\

@ -3,13 +3,12 @@ module repository.basistech.ru/BASIS/terraform-provider-decort
go 1.18 go 1.18
require ( require (
github.com/golang-jwt/jwt/v4 v4.4.3
github.com/google/uuid v1.3.0 github.com/google/uuid v1.3.0
github.com/hashicorp/terraform-plugin-docs v0.13.0 github.com/hashicorp/terraform-plugin-docs v0.13.0
github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1
github.com/sirupsen/logrus v1.9.0 github.com/sirupsen/logrus v1.9.0
golang.org/x/net v0.16.0 golang.org/x/net v0.16.0
repository.basistech.ru/BASIS/decort-golang-sdk v1.7.2 repository.basistech.ru/BASIS/decort-golang-sdk v1.7.3
) )
require ( require (

@ -58,8 +58,6 @@ github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91
github.com/go-playground/validator/v10 v10.15.4 h1:zMXza4EpOdooxPel5xDqXEdXG5r+WggpvnAKMsalBjs= github.com/go-playground/validator/v10 v10.15.4 h1:zMXza4EpOdooxPel5xDqXEdXG5r+WggpvnAKMsalBjs=
github.com/go-playground/validator/v10 v10.15.4/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-playground/validator/v10 v10.15.4/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU=
github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -339,5 +337,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
repository.basistech.ru/BASIS/decort-golang-sdk v1.7.2 h1:Ll8MBcmDcElxxgxOUUaYbbafTSbIm4dcPEDLl4fdF8Q= repository.basistech.ru/BASIS/decort-golang-sdk v1.7.3 h1:NtvW72WsAezk0XYKE5+ag+xauIgKWKcbKLy7YTp5Fuc=
repository.basistech.ru/BASIS/decort-golang-sdk v1.7.2/go.mod h1:7fj8sgGZFiiExewQeqckCS4WxwOmU0oP6BO6mi1Lpkw= repository.basistech.ru/BASIS/decort-golang-sdk v1.7.3/go.mod h1:7fj8sgGZFiiExewQeqckCS4WxwOmU0oP6BO6mi1Lpkw=

@ -21,7 +21,6 @@ limitations under the License.
package controller package controller
import ( import (
"bytes"
"crypto/tls" "crypto/tls"
"fmt" "fmt"
"io" "io"
@ -38,8 +37,6 @@ import (
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker"
jwt "github.com/golang-jwt/jwt/v4"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
) )
@ -204,27 +201,27 @@ func ControllerConfigure(d *schema.ResourceData) (*ControllerCfg, error) {
case MODE_DECS3O: case MODE_DECS3O:
// on success getDECS3OJWT will set config.jwt to the obtained JWT, so there is no // on success getDECS3OJWT will set config.jwt to the obtained JWT, so there is no
// need to set it once again here // need to set it once again here
_, err := ret_config.getDECS3OJWT() // _, err := ret_config.getDECS3OJWT()
if err != nil { // if err != nil {
return nil, err // return nil, err
} // }
// we are not verifying the JWT when parsing because actual verification is done on the // we are not verifying the JWT when parsing because actual verification is done on the
// OVC controller side. Here we do parsing solely to extract Oauth2 user name (claim "user") // OVC controller side. Here we do parsing solely to extract Oauth2 user name (claim "user")
// and JWT issuer name (claim "iss") // and JWT issuer name (claim "iss")
parser := jwt.Parser{} // parser := jwt.Parser{}
token, _, err := parser.ParseUnverified(ret_config.jwt, jwt.MapClaims{}) // token, _, err := parser.ParseUnverified(ret_config.jwt, jwt.MapClaims{})
if err != nil { // if err != nil {
return nil, err // return nil, err
} // }
if claims, ok := token.Claims.(jwt.MapClaims); ok { // if claims, ok := token.Claims.(jwt.MapClaims); ok {
var tbuf bytes.Buffer // var tbuf bytes.Buffer
tbuf.WriteString(claims["username"].(string)) // tbuf.WriteString(claims["username"].(string))
tbuf.WriteString("@") // tbuf.WriteString("@")
tbuf.WriteString(claims["iss"].(string)) // tbuf.WriteString(claims["iss"].(string))
ret_config.decort_username = tbuf.String() // ret_config.decort_username = tbuf.String()
} else { // } else {
return nil, fmt.Errorf("failed to extract user and iss fields from JWT token in oauth2 mode") // return nil, fmt.Errorf("failed to extract user and iss fields from JWT token in oauth2 mode")
} // }
sdkConf := config.Config{ sdkConf := config.Config{
AppID: ret_config.app_id, AppID: ret_config.app_id,
@ -263,57 +260,57 @@ func ControllerConfigure(d *schema.ResourceData) (*ControllerCfg, error) {
return ret_config, nil return ret_config, nil
} }
func (config *ControllerCfg) GetDecortUsername() string { // func (config *ControllerCfg) GetDecortUsername() string {
return config.decort_username // return config.decort_username
} // }
func (config *ControllerCfg) getDECS3OJWT() (string, error) { // func (config *ControllerCfg) getDECS3OJWT() (string, error) {
// Obtain JWT from the Oauth2 provider using application ID and application secret provided in config. // // Obtain JWT from the Oauth2 provider using application ID and application secret provided in config.
if config.auth_mode_code == MODE_UNDEF { // if config.auth_mode_code == MODE_UNDEF {
return "", fmt.Errorf("getOAuth2JWT method called for undefined authorization mode") // return "", fmt.Errorf("getOAuth2JWT method called for undefined authorization mode")
} // }
if config.auth_mode_code != MODE_DECS3O { // if config.auth_mode_code != MODE_DECS3O {
return "", fmt.Errorf("getOAuth2JWT method called for incompatible authorization mode %q", config.auth_mode_txt) // return "", fmt.Errorf("getOAuth2JWT method called for incompatible authorization mode %q", config.auth_mode_txt)
} // }
params := url.Values{} // params := url.Values{}
params.Add("grant_type", "client_credentials") // params.Add("grant_type", "client_credentials")
params.Add("client_id", config.app_id) // params.Add("client_id", config.app_id)
params.Add("client_secret", config.app_secret) // params.Add("client_secret", config.app_secret)
params.Add("response_type", "id_token") // params.Add("response_type", "id_token")
params.Add("validity", "3600") // params.Add("validity", "3600")
params_str := params.Encode() // params_str := params.Encode()
req, err := http.NewRequest("POST", config.oauth2_url+"/v1/oauth/access_token", strings.NewReader(params_str)) // req, err := http.NewRequest("POST", config.oauth2_url+"/v1/oauth/access_token", strings.NewReader(params_str))
if err != nil { // if err != nil {
return "", err // return "", err
} // }
req.Header.Set("Content-Type", "application/x-www-form-urlencoded") // req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.Header.Set("Content-Length", strconv.Itoa(len(params_str))) // req.Header.Set("Content-Length", strconv.Itoa(len(params_str)))
resp, err := config.cc_client.Do(req) // resp, err := config.cc_client.Do(req)
if err != nil { // if err != nil {
return "", err // return "", err
} // }
if resp.StatusCode != http.StatusOK { // if resp.StatusCode != http.StatusOK {
// fmt.Println("response Status:", resp.Status) // // fmt.Println("response Status:", resp.Status)
// fmt.Println("response Headers:", resp.Header) // // fmt.Println("response Headers:", resp.Header)
// fmt.Println("response Headers:", req.URL) // // fmt.Println("response Headers:", req.URL)
return "", fmt.Errorf("getOauth2JWT: unexpected status code %d when obtaining JWT from %q for APP_ID %q, request Body %q", // return "", fmt.Errorf("getOauth2JWT: unexpected status code %d when obtaining JWT from %q for APP_ID %q, request Body %q",
resp.StatusCode, req.URL, config.app_id, params_str) // resp.StatusCode, req.URL, config.app_id, params_str)
} // }
defer resp.Body.Close() // defer resp.Body.Close()
responseData, err := io.ReadAll(resp.Body) // responseData, err := io.ReadAll(resp.Body)
if err != nil { // if err != nil {
return "", err // return "", err
} // }
// validation successful - store JWT in the corresponding field of the ControllerCfg structure // // validation successful - store JWT in the corresponding field of the ControllerCfg structure
config.jwt = strings.TrimSpace(string(responseData)) // config.jwt = strings.TrimSpace(string(responseData))
return config.jwt, nil // return config.jwt, nil
} // }
func (config *ControllerCfg) validateJWT(jwt string) (bool, error) { func (config *ControllerCfg) validateJWT(jwt string) (bool, error) {
/* /*

@ -45,6 +45,7 @@ import (
func dataSourceAccountRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
acc, err := utilityAccountCheckPresence(ctx, d, m) acc, err := utilityAccountCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -99,6 +100,10 @@ func resourceLimitsSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat, Type: schema.TypeFloat,
Computed: true, Computed: true,
}, },
"cu_dm": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_i": { "cu_i": {
Type: schema.TypeFloat, Type: schema.TypeFloat,
Computed: true, Computed: true,

@ -1,127 +1,128 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package account package account
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func flattenAccountAuditsList(aal account.ListAudits) []map[string]interface{} { func flattenAccountAuditsList(aal account.ListAudits) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
for _, aa := range aal { for _, aa := range aal {
temp := map[string]interface{}{ temp := map[string]interface{}{
"call": aa.Call, "call": aa.Call,
"responsetime": aa.ResponseTime, "responsetime": aa.ResponseTime,
"statuscode": aa.StatusCode, "statuscode": aa.StatusCode,
"timestamp": aa.Timestamp, "timestamp": aa.Timestamp,
"user": aa.User, "user": aa.User,
} }
res = append(res, temp) res = append(res, temp)
} }
return res return res
} }
func dataSourceAccountAuditsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountAuditsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountAuditsList, err := utilityAccountAuditsListCheckPresence(ctx, d, m) accountAuditsList, err := utilityAccountAuditsListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenAccountAuditsList(accountAuditsList)) d.SetId(id.String())
d.Set("items", flattenAccountAuditsList(accountAuditsList))
return nil
} return nil
}
func dataSourceAccountAuditsListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceAccountAuditsListSchemaMake() map[string]*schema.Schema {
"account_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Required: true, Type: schema.TypeInt,
Description: "ID of the account", Required: true,
}, Description: "ID of the account",
"items": { },
Type: schema.TypeList, "items": {
Computed: true, Type: schema.TypeList,
Description: "Search Result", Computed: true,
Elem: &schema.Resource{ Description: "Search Result",
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"call": { Schema: map[string]*schema.Schema{
Type: schema.TypeString, "call": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"responsetime": { },
Type: schema.TypeFloat, "responsetime": {
Computed: true, Type: schema.TypeFloat,
}, Computed: true,
"statuscode": { },
Type: schema.TypeInt, "statuscode": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"timestamp": { },
Type: schema.TypeFloat, "timestamp": {
Computed: true, Type: schema.TypeFloat,
}, Computed: true,
"user": { },
Type: schema.TypeString, "user": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
}, },
}, },
}, },
} },
return res }
} return res
}
func DataSourceAccountAuditsList() *schema.Resource {
return &schema.Resource{ func DataSourceAccountAuditsList() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountAuditsListRead,
ReadContext: dataSourceAccountAuditsListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceAccountAuditsListSchemaMake(),
} Schema: dataSourceAccountAuditsListSchemaMake(),
} }
}

@ -1,257 +1,258 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package account package account
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func flattenAccountComputesList(acl *account.ListComputes) []map[string]interface{} { func flattenAccountComputesList(acl *account.ListComputes) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
for _, acc := range acl.Data { for _, acc := range acl.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"account_id": acc.AccountID, "account_id": acc.AccountID,
"account_name": acc.AccountName, "account_name": acc.AccountName,
"cpus": acc.CPUs, "cpus": acc.CPUs,
"created_by": acc.CreatedBy, "created_by": acc.CreatedBy,
"created_time": acc.CreatedTime, "created_time": acc.CreatedTime,
"deleted_by": acc.DeletedBy, "deleted_by": acc.DeletedBy,
"deleted_time": acc.DeletedTime, "deleted_time": acc.DeletedTime,
"compute_id": acc.ComputeID, "compute_id": acc.ComputeID,
"compute_name": acc.ComputeName, "compute_name": acc.ComputeName,
"ram": acc.RAM, "ram": acc.RAM,
"registered": acc.Registered, "registered": acc.Registered,
"rg_id": acc.RGID, "rg_id": acc.RGID,
"rg_name": acc.RGName, "rg_name": acc.RGName,
"status": acc.Status, "status": acc.Status,
"tech_status": acc.TechStatus, "tech_status": acc.TechStatus,
"total_disks_size": acc.TotalDisksSize, "total_disks_size": acc.TotalDisksSize,
"updated_by": acc.UpdatedBy, "updated_by": acc.UpdatedBy,
"updated_time": acc.UpdatedTime, "updated_time": acc.UpdatedTime,
"user_managed": acc.UserManaged, "user_managed": acc.UserManaged,
"vins_connected": acc.VINSConnected, "vins_connected": acc.VINSConnected,
} }
res = append(res, temp) res = append(res, temp)
} }
return res return res
} }
func dataSourceAccountComputesListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountComputesListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountComputesList, err := utilityAccountComputesListCheckPresence(ctx, d, m) accountComputesList, err := utilityAccountComputesListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenAccountComputesList(accountComputesList)) d.SetId(id.String())
d.Set("entry_count", accountComputesList.EntryCount) d.Set("items", flattenAccountComputesList(accountComputesList))
d.Set("entry_count", accountComputesList.EntryCount)
return nil
} return nil
}
func dataSourceAccountComputesListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceAccountComputesListSchemaMake() map[string]*schema.Schema {
"account_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Required: true, Type: schema.TypeInt,
Description: "ID of the account", Required: true,
}, Description: "ID of the account",
"compute_id": { },
Type: schema.TypeInt, "compute_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by compute ID", Optional: true,
}, Description: "Filter by compute ID",
"name": { },
Type: schema.TypeString, "name": {
Optional: true, Type: schema.TypeString,
Description: "Filter by compute name", Optional: true,
}, Description: "Filter by compute name",
"rg_name": { },
Type: schema.TypeString, "rg_name": {
Optional: true, Type: schema.TypeString,
Description: "Filter by RG name", Optional: true,
}, Description: "Filter by RG name",
"rg_id": { },
Type: schema.TypeInt, "rg_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by RG ID", Optional: true,
}, Description: "Filter by RG ID",
"tech_status": { },
Type: schema.TypeString, "tech_status": {
Optional: true, Type: schema.TypeString,
Description: "Filter by tech. status", Optional: true,
}, Description: "Filter by tech. status",
"ip_address": { },
Type: schema.TypeString, "ip_address": {
Optional: true, Type: schema.TypeString,
Description: "Filter by IP address", Optional: true,
}, Description: "Filter by IP address",
"extnet_name": { },
Type: schema.TypeString, "extnet_name": {
Optional: true, Type: schema.TypeString,
Description: "Filter by extnet name", Optional: true,
}, Description: "Filter by extnet name",
"extnet_id": { },
Type: schema.TypeInt, "extnet_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by extnet ID", Optional: true,
}, Description: "Filter by extnet ID",
"page": { },
Type: schema.TypeInt, "page": {
Optional: true, Type: schema.TypeInt,
Description: "Page number", Optional: true,
}, Description: "Page number",
"size": { },
Type: schema.TypeInt, "size": {
Optional: true, Type: schema.TypeInt,
Description: "Page size", Optional: true,
}, Description: "Page size",
"items": { },
Type: schema.TypeList, "items": {
Computed: true, Type: schema.TypeList,
Description: "Search Result", Computed: true,
Elem: &schema.Resource{ Description: "Search Result",
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"account_id": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"account_name": { },
Type: schema.TypeString, "account_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"cpus": { },
Type: schema.TypeInt, "cpus": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"created_by": { },
Type: schema.TypeString, "created_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"created_time": { },
Type: schema.TypeInt, "created_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"deleted_by": { },
Type: schema.TypeString, "deleted_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"deleted_time": { },
Type: schema.TypeInt, "deleted_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"compute_id": { },
Type: schema.TypeInt, "compute_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"compute_name": { },
Type: schema.TypeString, "compute_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"ram": { },
Type: schema.TypeInt, "ram": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"registered": { },
Type: schema.TypeBool, "registered": {
Computed: true, Type: schema.TypeBool,
}, Computed: true,
"rg_id": { },
Type: schema.TypeInt, "rg_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"rg_name": { },
Type: schema.TypeString, "rg_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"status": { },
Type: schema.TypeString, "status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"tech_status": { },
Type: schema.TypeString, "tech_status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"total_disks_size": { },
Type: schema.TypeInt, "total_disks_size": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"updated_by": { },
Type: schema.TypeString, "updated_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"updated_time": { },
Type: schema.TypeInt, "updated_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"user_managed": { },
Type: schema.TypeBool, "user_managed": {
Computed: true, Type: schema.TypeBool,
}, Computed: true,
"vins_connected": { },
Type: schema.TypeInt, "vins_connected": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
}, },
}, },
}, },
"entry_count": { },
Type: schema.TypeInt, "entry_count": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
} },
return res }
} return res
}
func DataSourceAccountComputesList() *schema.Resource {
return &schema.Resource{ func DataSourceAccountComputesList() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountComputesListRead,
ReadContext: dataSourceAccountComputesListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceAccountComputesListSchemaMake(),
} Schema: dataSourceAccountComputesListSchemaMake(),
} }
}

@ -1,110 +1,116 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package account package account
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceAccountConsumedUnitsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountConsumedUnitsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountConsumedUnits, err := utilityAccountConsumedUnitsCheckPresence(ctx, d, m) accountConsumedUnits, err := utilityAccountConsumedUnitsCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("cu_c", accountConsumedUnits.CUC) d.SetId(id.String())
d.Set("cu_d", accountConsumedUnits.CUD) d.Set("cu_c", accountConsumedUnits.CUC)
d.Set("cu_i", accountConsumedUnits.CUI) d.Set("cu_d", accountConsumedUnits.CUD)
d.Set("cu_m", accountConsumedUnits.CUM) d.Set("cu_dm", accountConsumedUnits.CUDM)
d.Set("cu_np", accountConsumedUnits.CUNP) d.Set("cu_i", accountConsumedUnits.CUI)
d.Set("gpu_units", accountConsumedUnits.GPUUnits) d.Set("cu_m", accountConsumedUnits.CUM)
d.Set("cu_np", accountConsumedUnits.CUNP)
return nil d.Set("gpu_units", accountConsumedUnits.GPUUnits)
}
return nil
func dataSourceAccountConsumedUnitsSchemaMake() map[string]*schema.Schema { }
res := map[string]*schema.Schema{
"account_id": { func dataSourceAccountConsumedUnitsSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt, res := map[string]*schema.Schema{
Required: true, "account_id": {
Description: "ID of the account", Type: schema.TypeInt,
}, Required: true,
"cu_c": { Description: "ID of the account",
Type: schema.TypeFloat, },
Computed: true, "cu_c": {
}, Type: schema.TypeFloat,
"cu_d": { Computed: true,
Type: schema.TypeFloat, },
Computed: true, "cu_d": {
}, Type: schema.TypeFloat,
"cu_i": { Computed: true,
Type: schema.TypeFloat, },
Computed: true, "cu_dm": {
}, Type: schema.TypeFloat,
"cu_m": { Computed: true,
Type: schema.TypeFloat, },
Computed: true, "cu_i": {
}, Type: schema.TypeFloat,
"cu_np": { Computed: true,
Type: schema.TypeFloat, },
Computed: true, "cu_m": {
}, Type: schema.TypeFloat,
"gpu_units": { Computed: true,
Type: schema.TypeFloat, },
Computed: true, "cu_np": {
}, Type: schema.TypeFloat,
} Computed: true,
return res },
} "gpu_units": {
Type: schema.TypeFloat,
func DataSourceAccountConsumedUnits() *schema.Resource { Computed: true,
return &schema.Resource{ },
SchemaVersion: 1, }
return res
ReadContext: dataSourceAccountConsumedUnitsRead, }
Timeouts: &schema.ResourceTimeout{ func DataSourceAccountConsumedUnits() *schema.Resource {
Read: &constants.Timeout30s, return &schema.Resource{
Default: &constants.Timeout60s, SchemaVersion: 1,
},
ReadContext: dataSourceAccountConsumedUnitsRead,
Schema: dataSourceAccountConsumedUnitsSchemaMake(),
} Timeouts: &schema.ResourceTimeout{
} Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountConsumedUnitsSchemaMake(),
}
}

@ -1,90 +1,91 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package account package account
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceAccountConsumedUnitsByTypeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountConsumedUnitsByTypeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
result, err := utilityAccountConsumedUnitsByTypeCheckPresence(ctx, d, m) result, err := utilityAccountConsumedUnitsByTypeCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("cu_result", result) d.SetId(id.String())
d.Set("cu_result", result)
return nil
} return nil
}
func dataSourceAccountConsumedUnitsByTypeSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceAccountConsumedUnitsByTypeSchemaMake() map[string]*schema.Schema {
"account_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Required: true, Type: schema.TypeInt,
Description: "ID of the account", Required: true,
}, Description: "ID of the account",
"cu_type": { },
Type: schema.TypeString, "cu_type": {
Required: true, Type: schema.TypeString,
Description: "cloud unit resource type", Required: true,
}, Description: "cloud unit resource type",
"cu_result": { },
Type: schema.TypeFloat, "cu_result": {
Computed: true, Type: schema.TypeFloat,
}, Computed: true,
} },
return res }
} return res
}
func DataSourceAccountConsumedUnitsByType() *schema.Resource {
return &schema.Resource{ func DataSourceAccountConsumedUnitsByType() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountConsumedUnitsByTypeRead,
ReadContext: dataSourceAccountConsumedUnitsByTypeRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceAccountConsumedUnitsByTypeSchemaMake(),
} Schema: dataSourceAccountConsumedUnitsByTypeSchemaMake(),
} }
}

@ -1,77 +1,78 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package account package account
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceAccountDeletedListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountDeletedListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountDeletedList, err := utilityAccountDeletedListCheckPresence(ctx, d, m) accountDeletedList, err := utilityAccountDeletedListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenAccountList(accountDeletedList)) d.SetId(id.String())
d.Set("entry_count", accountDeletedList.EntryCount) d.Set("items", flattenAccountList(accountDeletedList))
d.Set("entry_count", accountDeletedList.EntryCount)
return nil
} return nil
}
func dataSourceAccountDeletedListSchemaMake() map[string]*schema.Schema {
temp := dataSourceAccountListSchemaMake() func dataSourceAccountDeletedListSchemaMake() map[string]*schema.Schema {
delete(temp, "status") temp := dataSourceAccountListSchemaMake()
return temp delete(temp, "status")
} return temp
}
func DataSourceAccountDeletedList() *schema.Resource {
return &schema.Resource{ func DataSourceAccountDeletedList() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountDeletedListRead,
ReadContext: dataSourceAccountDeletedListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceAccountDeletedListSchemaMake(),
} Schema: dataSourceAccountDeletedListSchemaMake(),
} }
}

@ -1,171 +1,172 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package account package account
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func flattenAccountDisksList(adl *account.ListDisks) []map[string]interface{} { func flattenAccountDisksList(adl *account.ListDisks) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
for _, ad := range adl.Data { for _, ad := range adl.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"disk_id": ad.ID, "disk_id": ad.ID,
"disk_name": ad.Name, "disk_name": ad.Name,
"pool": ad.Pool, "pool": ad.Pool,
"sep_id": ad.SEPID, "sep_id": ad.SEPID,
"shareable": ad.Shareable, "shareable": ad.Shareable,
"size_max": ad.SizeMax, "size_max": ad.SizeMax,
"type": ad.Type, "type": ad.Type,
} }
res = append(res, temp) res = append(res, temp)
} }
return res return res
} }
func dataSourceAccountDisksListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountDisksListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountDisksList, err := utilityAccountDisksListCheckPresence(ctx, d, m) accountDisksList, err := utilityAccountDisksListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenAccountDisksList(accountDisksList)) d.SetId(id.String())
d.Set("entry_count", accountDisksList.EntryCount) d.Set("items", flattenAccountDisksList(accountDisksList))
d.Set("entry_count", accountDisksList.EntryCount)
return nil
} return nil
}
func dataSourceAccountDisksListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceAccountDisksListSchemaMake() map[string]*schema.Schema {
"account_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Required: true, Type: schema.TypeInt,
Description: "ID of the account", Required: true,
}, Description: "ID of the account",
"disk_id": { },
Type: schema.TypeInt, "disk_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by disk ID", Optional: true,
}, Description: "Filter by disk ID",
"name": { },
Type: schema.TypeString, "name": {
Optional: true, Type: schema.TypeString,
Description: "Filter by disk name", Optional: true,
}, Description: "Filter by disk name",
"disk_max_size": { },
Type: schema.TypeInt, "disk_max_size": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by disk max size", Optional: true,
}, Description: "Filter by disk max size",
"type": { },
Type: schema.TypeString, "type": {
Optional: true, Type: schema.TypeString,
Description: "Filter by disk type", Optional: true,
}, Description: "Filter by disk type",
"page": { },
Type: schema.TypeInt, "page": {
Optional: true, Type: schema.TypeInt,
Description: "Page number", Optional: true,
}, Description: "Page number",
"size": { },
Type: schema.TypeInt, "size": {
Optional: true, Type: schema.TypeInt,
Description: "Page size", Optional: true,
}, Description: "Page size",
"items": { },
Type: schema.TypeList, "items": {
Computed: true, Type: schema.TypeList,
Description: "Search Result", Computed: true,
Elem: &schema.Resource{ Description: "Search Result",
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"disk_id": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "disk_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"disk_name": { },
Type: schema.TypeString, "disk_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"pool": { },
Type: schema.TypeString, "pool": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"sep_id": { },
Type: schema.TypeInt, "sep_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"shareable": { },
Type: schema.TypeBool, "shareable": {
Computed: true, Type: schema.TypeBool,
}, Computed: true,
"size_max": { },
Type: schema.TypeInt, "size_max": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"type": { },
Type: schema.TypeString, "type": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
}, },
}, },
}, },
"entry_count": { },
Type: schema.TypeInt, "entry_count": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
} },
return res }
} return res
}
func DataSourceAccountDisksList() *schema.Resource {
return &schema.Resource{ func DataSourceAccountDisksList() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountDisksListRead,
ReadContext: dataSourceAccountDisksListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceAccountDisksListSchemaMake(),
} Schema: dataSourceAccountDisksListSchemaMake(),
} }
}

@ -1,252 +1,253 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package account package account
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func flattenAccountFlipGroupsList(afgl *account.ListFLIPGroups) []map[string]interface{} { func flattenAccountFlipGroupsList(afgl *account.ListFLIPGroups) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
for _, afg := range afgl.Data { for _, afg := range afgl.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"account_id": afg.AccountID, "account_id": afg.AccountID,
"client_type": afg.ClientType, "client_type": afg.ClientType,
"conn_type": afg.ConnType, "conn_type": afg.ConnType,
"created_by": afg.CreatedBy, "created_by": afg.CreatedBy,
"created_time": afg.CreatedTime, "created_time": afg.CreatedTime,
"default_gw": afg.DefaultGW, "default_gw": afg.DefaultGW,
"deleted_by": afg.DeletedBy, "deleted_by": afg.DeletedBy,
"deleted_time": afg.DeletedTime, "deleted_time": afg.DeletedTime,
"desc": afg.Description, "desc": afg.Description,
"gid": afg.GID, "gid": afg.GID,
"guid": afg.GUID, "guid": afg.GUID,
"fg_id": afg.ID, "fg_id": afg.ID,
"ip": afg.IP, "ip": afg.IP,
"milestones": afg.Milestones, "milestones": afg.Milestones,
"fg_name": afg.Name, "fg_name": afg.Name,
"net_id": afg.NetID, "net_id": afg.NetID,
"net_type": afg.NetType, "net_type": afg.NetType,
"netmask": afg.NetMask, "netmask": afg.NetMask,
"status": afg.Status, "status": afg.Status,
"updated_by": afg.UpdatedBy, "updated_by": afg.UpdatedBy,
"updated_time": afg.UpdatedTime, "updated_time": afg.UpdatedTime,
} }
res = append(res, temp) res = append(res, temp)
} }
return res return res
} }
func dataSourceAccountFlipGroupsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountFlipGroupsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountFlipGroupsList, err := utilityAccountFlipGroupsListCheckPresence(ctx, d, m) accountFlipGroupsList, err := utilityAccountFlipGroupsListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenAccountFlipGroupsList(accountFlipGroupsList)) d.SetId(id.String())
d.Set("entry_count", accountFlipGroupsList.EntryCount) d.Set("items", flattenAccountFlipGroupsList(accountFlipGroupsList))
d.Set("entry_count", accountFlipGroupsList.EntryCount)
return nil
} return nil
}
func dataSourceAccountFlipGroupsListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceAccountFlipGroupsListSchemaMake() map[string]*schema.Schema {
"account_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Required: true, Type: schema.TypeInt,
Description: "ID of the account", Required: true,
}, Description: "ID of the account",
"name": { },
Type: schema.TypeString, "name": {
Optional: true, Type: schema.TypeString,
Description: "Filter by name", Optional: true,
}, Description: "Filter by name",
"vins_id": { },
Type: schema.TypeInt, "vins_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by ViNS ID", Optional: true,
}, Description: "Filter by ViNS ID",
"vins_name": { },
Type: schema.TypeString, "vins_name": {
Optional: true, Type: schema.TypeString,
Description: "Filter by ViNS name", Optional: true,
}, Description: "Filter by ViNS name",
"extnet_id": { },
Type: schema.TypeInt, "extnet_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by extnet ID", Optional: true,
}, Description: "Filter by extnet ID",
"by_ip": { },
Type: schema.TypeString, "by_ip": {
Optional: true, Type: schema.TypeString,
Description: "Filter by IP", Optional: true,
}, Description: "Filter by IP",
"flipgroup_id": { },
Type: schema.TypeInt, "flipgroup_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by flipgroup ID", Optional: true,
}, Description: "Filter by flipgroup ID",
"page": { },
Type: schema.TypeInt, "page": {
Optional: true, Type: schema.TypeInt,
Description: "Page number", Optional: true,
}, Description: "Page number",
"size": { },
Type: schema.TypeInt, "size": {
Optional: true, Type: schema.TypeInt,
Description: "Page size", Optional: true,
}, Description: "Page size",
"items": { },
Type: schema.TypeList, "items": {
Computed: true, Type: schema.TypeList,
Description: "Search Result", Computed: true,
Elem: &schema.Resource{ Description: "Search Result",
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"account_id": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"client_type": { },
Type: schema.TypeString, "client_type": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"conn_type": { },
Type: schema.TypeString, "conn_type": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"created_by": { },
Type: schema.TypeString, "created_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"created_time": { },
Type: schema.TypeInt, "created_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"default_gw": { },
Type: schema.TypeString, "default_gw": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"deleted_by": { },
Type: schema.TypeString, "deleted_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"deleted_time": { },
Type: schema.TypeInt, "deleted_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"desc": { },
Type: schema.TypeString, "desc": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"gid": { },
Type: schema.TypeInt, "gid": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"guid": { },
Type: schema.TypeInt, "guid": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"fg_id": { },
Type: schema.TypeInt, "fg_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"ip": { },
Type: schema.TypeString, "ip": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"milestones": { },
Type: schema.TypeInt, "milestones": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"fg_name": { },
Type: schema.TypeString, "fg_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"net_id": { },
Type: schema.TypeInt, "net_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"net_type": { },
Type: schema.TypeString, "net_type": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"netmask": { },
Type: schema.TypeInt, "netmask": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"status": { },
Type: schema.TypeString, "status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"updated_by": { },
Type: schema.TypeString, "updated_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"updated_time": { },
Type: schema.TypeInt, "updated_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
}, },
}, },
}, },
"entry_count": { },
Type: schema.TypeInt, "entry_count": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
} },
return res }
} return res
}
func DataSourceAccountFlipGroupsList() *schema.Resource {
return &schema.Resource{ func DataSourceAccountFlipGroupsList() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountFlipGroupsListRead,
ReadContext: dataSourceAccountFlipGroupsListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceAccountFlipGroupsListSchemaMake(),
} Schema: dataSourceAccountFlipGroupsListSchemaMake(),
} }
}

@ -44,6 +44,7 @@ import (
func dataSourceAccountResourceConsumptionGetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountResourceConsumptionGetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountResourceConsumptionRec, err := utilityAccountResourceConsumptionGetCheckPresence(ctx, d, m) accountResourceConsumptionRec, err := utilityAccountResourceConsumptionGetCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }

@ -1,209 +1,210 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package account package account
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func flattenAccountList(al *account.ListAccounts) []map[string]interface{} { func flattenAccountList(al *account.ListAccounts) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
for _, acc := range al.Data { for _, acc := range al.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"acl": flattenRgAcl(acc.ACL), "acl": flattenRgAcl(acc.ACL),
"created_time": acc.CreatedTime, "created_time": acc.CreatedTime,
"deleted_time": acc.DeletedTime, "deleted_time": acc.DeletedTime,
"account_id": acc.ID, "account_id": acc.ID,
"account_name": acc.Name, "account_name": acc.Name,
"status": acc.Status, "status": acc.Status,
"updated_time": acc.UpdatedTime, "updated_time": acc.UpdatedTime,
} }
res = append(res, temp) res = append(res, temp)
} }
return res return res
} }
func flattenRgAcl(rgAcls []account.RecordACL) []map[string]interface{} { func flattenRgAcl(rgAcls []account.RecordACL) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
for _, rgAcl := range rgAcls { for _, rgAcl := range rgAcls {
temp := map[string]interface{}{ temp := map[string]interface{}{
"explicit": rgAcl.IsExplicit, "explicit": rgAcl.IsExplicit,
"guid": rgAcl.GUID, "guid": rgAcl.GUID,
"right": rgAcl.Rights, "right": rgAcl.Rights,
"status": rgAcl.Status, "status": rgAcl.Status,
"type": rgAcl.Type, "type": rgAcl.Type,
"user_group_id": rgAcl.UgroupID, "user_group_id": rgAcl.UgroupID,
} }
res = append(res, temp) res = append(res, temp)
} }
return res return res
} }
func dataSourceAccountListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountList, err := utilityAccountListCheckPresence(ctx, d, m) accountList, err := utilityAccountListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenAccountList(accountList)) d.SetId(id.String())
d.Set("entry_count", accountList.EntryCount) d.Set("items", flattenAccountList(accountList))
d.Set("entry_count", accountList.EntryCount)
return nil
} return nil
}
func dataSourceAccountListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceAccountListSchemaMake() map[string]*schema.Schema {
"by_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "by_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by ID", Optional: true,
}, Description: "Filter by ID",
"name": { },
Type: schema.TypeString, "name": {
Optional: true, Type: schema.TypeString,
Description: "Filter by name", Optional: true,
}, Description: "Filter by name",
"acl": { },
Type: schema.TypeString, "acl": {
Optional: true, Type: schema.TypeString,
Description: "Filter by ACL", Optional: true,
}, Description: "Filter by ACL",
"status": { },
Type: schema.TypeString, "status": {
Optional: true, Type: schema.TypeString,
Description: "Filter by status", Optional: true,
}, Description: "Filter by status",
"page": { },
Type: schema.TypeInt, "page": {
Optional: true, Type: schema.TypeInt,
Description: "Page number", Optional: true,
}, Description: "Page number",
"size": { },
Type: schema.TypeInt, "size": {
Optional: true, Type: schema.TypeInt,
Description: "Page size", Optional: true,
}, Description: "Page size",
"items": { },
Type: schema.TypeList, "items": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"acl": { Schema: map[string]*schema.Schema{
Type: schema.TypeList, "acl": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"explicit": { Schema: map[string]*schema.Schema{
Type: schema.TypeBool, "explicit": {
Computed: true, Type: schema.TypeBool,
}, Computed: true,
"guid": { },
Type: schema.TypeString, "guid": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"right": { },
Type: schema.TypeString, "right": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"status": { },
Type: schema.TypeString, "status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"type": { },
Type: schema.TypeString, "type": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"user_group_id": { },
Type: schema.TypeString, "user_group_id": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
}, },
}, },
}, },
"created_time": { },
Type: schema.TypeInt, "created_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"deleted_time": { },
Type: schema.TypeInt, "deleted_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"account_id": { },
Type: schema.TypeInt, "account_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"account_name": { },
Type: schema.TypeString, "account_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"status": { },
Type: schema.TypeString, "status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"updated_time": { },
Type: schema.TypeInt, "updated_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
}, },
}, },
}, },
"entry_count": { },
Type: schema.TypeInt, "entry_count": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
} },
return res }
} return res
}
func DataSourceAccountList() *schema.Resource {
return &schema.Resource{ func DataSourceAccountList() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountListRead,
ReadContext: dataSourceAccountListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceAccountListSchemaMake(),
} Schema: dataSourceAccountListSchemaMake(),
} }
}

@ -1,110 +1,116 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package account package account
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceAccountReservedUnitsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountReservedUnitsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountReservedUnits, err := utilityAccountReservedUnitsCheckPresence(ctx, d, m) accountReservedUnits, err := utilityAccountReservedUnitsCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("cu_c", accountReservedUnits.CUC) d.SetId(id.String())
d.Set("cu_d", accountReservedUnits.CUD) d.Set("cu_c", accountReservedUnits.CUC)
d.Set("cu_i", accountReservedUnits.CUI) d.Set("cu_d", accountReservedUnits.CUD)
d.Set("cu_m", accountReservedUnits.CUM) d.Set("cu_dm", accountReservedUnits.CUDM)
d.Set("cu_np", accountReservedUnits.CUNP) d.Set("cu_i", accountReservedUnits.CUI)
d.Set("gpu_units", accountReservedUnits.GPUUnits) d.Set("cu_m", accountReservedUnits.CUM)
d.Set("cu_np", accountReservedUnits.CUNP)
return nil d.Set("gpu_units", accountReservedUnits.GPUUnits)
}
return nil
func dataSourceAccountReservedUnitsSchemaMake() map[string]*schema.Schema { }
res := map[string]*schema.Schema{
"account_id": { func dataSourceAccountReservedUnitsSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt, res := map[string]*schema.Schema{
Required: true, "account_id": {
Description: "ID of the account", Type: schema.TypeInt,
}, Required: true,
"cu_c": { Description: "ID of the account",
Type: schema.TypeFloat, },
Computed: true, "cu_c": {
}, Type: schema.TypeFloat,
"cu_d": { Computed: true,
Type: schema.TypeFloat, },
Computed: true, "cu_d": {
}, Type: schema.TypeFloat,
"cu_i": { Computed: true,
Type: schema.TypeFloat, },
Computed: true, "cu_dm": {
}, Type: schema.TypeFloat,
"cu_m": { Computed: true,
Type: schema.TypeFloat, },
Computed: true, "cu_i": {
}, Type: schema.TypeFloat,
"cu_np": { Computed: true,
Type: schema.TypeFloat, },
Computed: true, "cu_m": {
}, Type: schema.TypeFloat,
"gpu_units": { Computed: true,
Type: schema.TypeFloat, },
Computed: true, "cu_np": {
}, Type: schema.TypeFloat,
} Computed: true,
return res },
} "gpu_units": {
Type: schema.TypeFloat,
func DataSourceAccountReservedUnits() *schema.Resource { Computed: true,
return &schema.Resource{ },
SchemaVersion: 1, }
return res
ReadContext: dataSourceAccountReservedUnitsRead, }
Timeouts: &schema.ResourceTimeout{ func DataSourceAccountReservedUnits() *schema.Resource {
Read: &constants.Timeout30s, return &schema.Resource{
Default: &constants.Timeout60s, SchemaVersion: 1,
},
ReadContext: dataSourceAccountReservedUnitsRead,
Schema: dataSourceAccountReservedUnitsSchemaMake(),
} Timeouts: &schema.ResourceTimeout{
} Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountReservedUnitsSchemaMake(),
}
}

@ -44,6 +44,7 @@ import (
func dataSourceAccountResourceConsumptionListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountResourceConsumptionListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountResourceConsumptionList, err := utilityAccountResourceConsumptionListCheckPresence(ctx, d, m) accountResourceConsumptionList, err := utilityAccountResourceConsumptionListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }

@ -1,371 +1,372 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package account package account
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func flattenAccountRGList(argl *account.ListRG) []map[string]interface{} { func flattenAccountRGList(argl *account.ListRG) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
for _, arg := range argl.Data { for _, arg := range argl.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"computes": flattenAccRGComputes(arg.Computes), "computes": flattenAccRGComputes(arg.Computes),
"resources": flattenAccRGResources(arg.Resources), "resources": flattenAccRGResources(arg.Resources),
"created_by": arg.CreatedBy, "created_by": arg.CreatedBy,
"created_time": arg.CreatedTime, "created_time": arg.CreatedTime,
"deleted_by": arg.DeletedBy, "deleted_by": arg.DeletedBy,
"deleted_time": arg.DeletedTime, "deleted_time": arg.DeletedTime,
"rg_id": arg.RGID, "rg_id": arg.RGID,
"milestones": arg.Milestones, "milestones": arg.Milestones,
"rg_name": arg.RGName, "rg_name": arg.RGName,
"status": arg.Status, "status": arg.Status,
"updated_by": arg.UpdatedBy, "updated_by": arg.UpdatedBy,
"updated_time": arg.UpdatedTime, "updated_time": arg.UpdatedTime,
"vinses": arg.VINSes, "vinses": arg.VINSes,
} }
res = append(res, temp) res = append(res, temp)
} }
return res return res
} }
func flattenAccRGComputes(argc account.RGComputes) []map[string]interface{} { func flattenAccRGComputes(argc account.RGComputes) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{ temp := map[string]interface{}{
"started": argc.Started, "started": argc.Started,
"stopped": argc.Stopped, "stopped": argc.Stopped,
} }
res = append(res, temp) res = append(res, temp)
return res return res
} }
func flattenAccResourceHack(r account.LimitsRG) []map[string]interface{} { func flattenAccResourceHack(r account.LimitsRG) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{ temp := map[string]interface{}{
"cpu": r.CPU, "cpu": r.CPU,
"disksize": r.DiskSize, "disksize": r.DiskSize,
"extips": r.ExtIPs, "extips": r.ExtIPs,
"exttraffic": r.ExtTraffic, "exttraffic": r.ExtTraffic,
"gpu": r.GPU, "gpu": r.GPU,
"ram": r.RAM, "ram": r.RAM,
//"seps": flattenAccountSeps(r.SEPs), //"seps": flattenAccountSeps(r.SEPs),
} }
res = append(res, temp) res = append(res, temp)
return res return res
} }
func flattenAccResourceRg(r account.Resource) []map[string]interface{} { func flattenAccResourceRg(r account.Resource) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{ temp := map[string]interface{}{
"cpu": r.CPU, "cpu": r.CPU,
"disksize": r.DiskSize, "disksize": r.DiskSize,
"extips": r.ExtIPs, "extips": r.ExtIPs,
"exttraffic": r.ExtTraffic, "exttraffic": r.ExtTraffic,
"gpu": r.GPU, "gpu": r.GPU,
"ram": r.RAM, "ram": r.RAM,
} }
res = append(res, temp) res = append(res, temp)
return res return res
} }
func flattenAccRGResources(argr account.RGResources) []map[string]interface{} { func flattenAccRGResources(argr account.RGResources) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{ temp := map[string]interface{}{
"consumed": flattenAccResourceRg(argr.Consumed), "consumed": flattenAccResourceRg(argr.Consumed),
"limits": flattenAccResourceHack(argr.Limits), "limits": flattenAccResourceHack(argr.Limits),
"reserved": flattenAccResourceRg(argr.Reserved), "reserved": flattenAccResourceRg(argr.Reserved),
} }
res = append(res, temp) res = append(res, temp)
return res return res
} }
func dataSourceAccountRGListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountRGListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountRGList, err := utilityAccountRGListCheckPresence(ctx, d, m) accountRGList, err := utilityAccountRGListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenAccountRGList(accountRGList)) d.SetId(id.String())
d.Set("entry_count", accountRGList.EntryCount) d.Set("items", flattenAccountRGList(accountRGList))
d.Set("entry_count", accountRGList.EntryCount)
return nil
} return nil
}
func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema {
"account_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Required: true, Type: schema.TypeInt,
Description: "ID of the account", Required: true,
}, Description: "ID of the account",
"page": { },
Type: schema.TypeInt, "page": {
Optional: true, Type: schema.TypeInt,
Description: "Page number", Optional: true,
}, Description: "Page number",
"size": { },
Type: schema.TypeInt, "size": {
Optional: true, Type: schema.TypeInt,
Description: "Page size", Optional: true,
}, Description: "Page size",
"rg_id": { },
Type: schema.TypeInt, "rg_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by RG ID", Optional: true,
}, Description: "Filter by RG ID",
"vins_id": { },
Type: schema.TypeInt, "vins_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by ViNS ID", Optional: true,
}, Description: "Filter by ViNS ID",
"vm_id": { },
Type: schema.TypeInt, "vm_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by VM ID", Optional: true,
}, Description: "Filter by VM ID",
"name": { },
Type: schema.TypeString, "name": {
Optional: true, Type: schema.TypeString,
Description: "Filter by name", Optional: true,
}, Description: "Filter by name",
"status": { },
Type: schema.TypeString, "status": {
Optional: true, Type: schema.TypeString,
Description: "Filter by status", Optional: true,
}, Description: "Filter by status",
"items": { },
Type: schema.TypeList, "items": {
Computed: true, Type: schema.TypeList,
Description: "Search Result", Computed: true,
Elem: &schema.Resource{ Description: "Search Result",
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"computes": { Schema: map[string]*schema.Schema{
Type: schema.TypeList, "computes": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"started": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "started": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"stopped": { },
Type: schema.TypeInt, "stopped": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
}, },
}, },
}, },
"resources": { },
Type: schema.TypeList, "resources": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"consumed": { Schema: map[string]*schema.Schema{
Type: schema.TypeList, "consumed": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"cpu": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "cpu": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"disksize": { },
Type: schema.TypeInt, "disksize": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"extips": { },
Type: schema.TypeInt, "extips": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"exttraffic": { },
Type: schema.TypeInt, "exttraffic": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"gpu": { },
Type: schema.TypeInt, "gpu": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"ram": { },
Type: schema.TypeInt, "ram": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
}, },
}, },
}, },
},
"limits": {
Type: schema.TypeList, "limits": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"cpu": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "cpu": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"disksize": { },
Type: schema.TypeInt, "disksize": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"extips": { },
Type: schema.TypeInt, "extips": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"exttraffic": { },
Type: schema.TypeInt, "exttraffic": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"gpu": { },
Type: schema.TypeInt, "gpu": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"ram": { },
Type: schema.TypeInt, "ram": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
}, },
}, },
}, },
"reserved": { },
Type: schema.TypeList, "reserved": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"cpu": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "cpu": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"disksize": { },
Type: schema.TypeInt, "disksize": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"extips": { },
Type: schema.TypeInt, "extips": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"exttraffic": { },
Type: schema.TypeInt, "exttraffic": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"gpu": { },
Type: schema.TypeInt, "gpu": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"ram": { },
Type: schema.TypeInt, "ram": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
}, },
}, },
}, },
}, },
}, },
}, },
},
"created_by": {
Type: schema.TypeString, "created_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"created_time": { },
Type: schema.TypeInt, "created_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"deleted_by": { },
Type: schema.TypeString, "deleted_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"deleted_time": { },
Type: schema.TypeInt, "deleted_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"rg_id": { },
Type: schema.TypeInt, "rg_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"milestones": { },
Type: schema.TypeInt, "milestones": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"rg_name": { },
Type: schema.TypeString, "rg_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"status": { },
Type: schema.TypeString, "status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"updated_by": { },
Type: schema.TypeString, "updated_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"updated_time": { },
Type: schema.TypeInt, "updated_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"vinses": { },
Type: schema.TypeInt, "vinses": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
}, },
}, },
}, },
"entry_count": { },
Type: schema.TypeInt, "entry_count": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
} },
return res }
} return res
}
func DataSourceAccountRGList() *schema.Resource {
return &schema.Resource{ func DataSourceAccountRGList() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountRGListRead,
ReadContext: dataSourceAccountRGListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceAccountRGListSchemaMake(),
} Schema: dataSourceAccountRGListSchemaMake(),
} }
}

@ -1,185 +1,186 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package account package account
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func flattenAccountTemplatesList(atl *account.ListTemplates) []map[string]interface{} { func flattenAccountTemplatesList(atl *account.ListTemplates) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(atl.Data)) res := make([]map[string]interface{}, 0, len(atl.Data))
for _, at := range atl.Data { for _, at := range atl.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"unc_path": at.UNCPath, "unc_path": at.UNCPath,
"account_id": at.AccountID, "account_id": at.AccountID,
"desc": at.Description, "desc": at.Description,
"template_id": at.ID, "template_id": at.ID,
"template_name": at.Name, "template_name": at.Name,
"public": at.Public, "public": at.Public,
"size": at.Size, "size": at.Size,
"status": at.Status, "status": at.Status,
"type": at.Type, "type": at.Type,
"username": at.Username, "username": at.Username,
} }
res = append(res, temp) res = append(res, temp)
} }
return res return res
} }
func dataSourceAccountTemplatesListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountTemplatesListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountTemplatesList, err := utilityAccountTemplatesListCheckPresence(ctx, d, m) accountTemplatesList, err := utilityAccountTemplatesListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenAccountTemplatesList(accountTemplatesList)) d.SetId(id.String())
d.Set("entry_count", accountTemplatesList.EntryCount) d.Set("items", flattenAccountTemplatesList(accountTemplatesList))
return nil d.Set("entry_count", accountTemplatesList.EntryCount)
} return nil
}
func dataSourceAccountTemplatesListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceAccountTemplatesListSchemaMake() map[string]*schema.Schema {
"account_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Required: true, Type: schema.TypeInt,
Description: "ID of the account", Required: true,
}, Description: "ID of the account",
"include_deleted": { },
Type: schema.TypeBool, "include_deleted": {
Optional: true, Type: schema.TypeBool,
}, Optional: true,
"image_id": { },
Type: schema.TypeInt, "image_id": {
Optional: true, Type: schema.TypeInt,
Description: "Find by image id", Optional: true,
}, Description: "Find by image id",
"name": { },
Type: schema.TypeString, "name": {
Optional: true, Type: schema.TypeString,
Description: "Filter by name", Optional: true,
}, Description: "Filter by name",
"type": { },
Type: schema.TypeString, "type": {
Optional: true, Type: schema.TypeString,
Description: "Filter by type", Optional: true,
}, Description: "Filter by type",
"page": { },
Type: schema.TypeInt, "page": {
Optional: true, Type: schema.TypeInt,
Description: "Page number", Optional: true,
}, Description: "Page number",
"size": { },
Type: schema.TypeInt, "size": {
Optional: true, Type: schema.TypeInt,
Description: "Page size", Optional: true,
}, Description: "Page size",
"items": { },
Type: schema.TypeList, "items": {
Computed: true, Type: schema.TypeList,
Description: "Search Result", Computed: true,
Elem: &schema.Resource{ Description: "Search Result",
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"unc_path": { Schema: map[string]*schema.Schema{
Type: schema.TypeString, "unc_path": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"account_id": { },
Type: schema.TypeInt, "account_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"desc": { },
Type: schema.TypeString, "desc": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"template_id": { },
Type: schema.TypeInt, "template_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"template_name": { },
Type: schema.TypeString, "template_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"public": { },
Type: schema.TypeBool, "public": {
Computed: true, Type: schema.TypeBool,
}, Computed: true,
"size": { },
Type: schema.TypeInt, "size": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"status": { },
Type: schema.TypeString, "status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"type": { },
Type: schema.TypeString, "type": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"username": { },
Type: schema.TypeString, "username": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
}, },
}, },
}, },
"entry_count": { },
Type: schema.TypeInt, "entry_count": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
} },
return res }
} return res
}
func DataSourceAccountTemplatessList() *schema.Resource {
return &schema.Resource{ func DataSourceAccountTemplatessList() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountTemplatesListRead,
ReadContext: dataSourceAccountTemplatesListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceAccountTemplatesListSchemaMake(),
} Schema: dataSourceAccountTemplatesListSchemaMake(),
} }
}

@ -1,222 +1,223 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package account package account
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func flattenAccountVinsList(avl *account.ListVINS) []map[string]interface{} { func flattenAccountVinsList(avl *account.ListVINS) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
for _, av := range avl.Data { for _, av := range avl.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"account_id": av.AccountID, "account_id": av.AccountID,
"account_name": av.AccountName, "account_name": av.AccountName,
"computes": av.Computes, "computes": av.Computes,
"created_by": av.CreatedBy, "created_by": av.CreatedBy,
"created_time": av.CreatedTime, "created_time": av.CreatedTime,
"deleted_by": av.DeletedBy, "deleted_by": av.DeletedBy,
"deleted_time": av.DeletedTime, "deleted_time": av.DeletedTime,
"external_ip": av.ExternalIP, "external_ip": av.ExternalIP,
"vin_id": av.ID, "vin_id": av.ID,
"vin_name": av.Name, "vin_name": av.Name,
"network": av.Network, "network": av.Network,
"pri_vnf_dev_id": av.PriVNFDevID, "pri_vnf_dev_id": av.PriVNFDevID,
"rg_id": av.RGID, "rg_id": av.RGID,
"rg_name": av.RGName, "rg_name": av.RGName,
"status": av.Status, "status": av.Status,
"updated_by": av.UpdatedBy, "updated_by": av.UpdatedBy,
"updated_time": av.UpdatedTime, "updated_time": av.UpdatedTime,
} }
res = append(res, temp) res = append(res, temp)
} }
return res return res
} }
func dataSourceAccountVinsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountVinsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountVinsList, err := utilityAccountVinsListCheckPresence(ctx, d, m) accountVinsList, err := utilityAccountVinsListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenAccountVinsList(accountVinsList)) d.SetId(id.String())
d.Set("entry_count", accountVinsList.EntryCount) d.Set("items", flattenAccountVinsList(accountVinsList))
d.Set("entry_count", accountVinsList.EntryCount)
return nil
} return nil
}
func dataSourceAccountVinsListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceAccountVinsListSchemaMake() map[string]*schema.Schema {
"account_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Required: true, Type: schema.TypeInt,
Description: "ID of the account", Required: true,
}, Description: "ID of the account",
"vins_id": { },
Type: schema.TypeInt, "vins_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by ViNS ID", Optional: true,
}, Description: "Filter by ViNS ID",
"name": { },
Type: schema.TypeString, "name": {
Optional: true, Type: schema.TypeString,
Description: "Filter by name", Optional: true,
}, Description: "Filter by name",
"rg_id": { },
Type: schema.TypeInt, "rg_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by RG ID", Optional: true,
}, Description: "Filter by RG ID",
"ext_ip": { },
Type: schema.TypeString, "ext_ip": {
Optional: true, Type: schema.TypeString,
Description: "Filter by external IP", Optional: true,
}, Description: "Filter by external IP",
"page": { },
Type: schema.TypeInt, "page": {
Optional: true, Type: schema.TypeInt,
Description: "Page number", Optional: true,
}, Description: "Page number",
"size": { },
Type: schema.TypeInt, "size": {
Optional: true, Type: schema.TypeInt,
Description: "Page size", Optional: true,
}, Description: "Page size",
"items": { },
Type: schema.TypeList, "items": {
Computed: true, Type: schema.TypeList,
Description: "Search Result", Computed: true,
Elem: &schema.Resource{ Description: "Search Result",
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"account_id": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"account_name": { },
Type: schema.TypeString, "account_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"computes": { },
Type: schema.TypeInt, "computes": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"created_by": { },
Type: schema.TypeString, "created_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"created_time": { },
Type: schema.TypeInt, "created_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"deleted_by": { },
Type: schema.TypeString, "deleted_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"deleted_time": { },
Type: schema.TypeInt, "deleted_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"external_ip": { },
Type: schema.TypeString, "external_ip": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"vin_id": { },
Type: schema.TypeInt, "vin_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"vin_name": { },
Type: schema.TypeString, "vin_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"network": { },
Type: schema.TypeString, "network": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"pri_vnf_dev_id": { },
Type: schema.TypeInt, "pri_vnf_dev_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"rg_id": { },
Type: schema.TypeInt, "rg_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"rg_name": { },
Type: schema.TypeString, "rg_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"status": { },
Type: schema.TypeString, "status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"updated_by": { },
Type: schema.TypeString, "updated_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"updated_time": { },
Type: schema.TypeInt, "updated_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
}, },
}, },
}, },
"entry_count": { },
Type: schema.TypeInt, "entry_count": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
} },
return res }
} return res
}
func DataSourceAccountVinsList() *schema.Resource {
return &schema.Resource{ func DataSourceAccountVinsList() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountVinsListRead,
ReadContext: dataSourceAccountVinsListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceAccountVinsListSchemaMake(),
} Schema: dataSourceAccountVinsListSchemaMake(),
} }
}

File diff suppressed because it is too large Load Diff

@ -1,297 +1,298 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package bservice package bservice
import ( import (
"context" "context"
"strconv" "strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceBasicServiceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceBasicServiceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
bs, err := utilityBasicServiceCheckPresence(ctx, d, m) bs, err := utilityBasicServiceCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(bs.ID, 10))
d.SetId(strconv.FormatUint(bs.ID, 10))
flattenService(d, bs)
flattenService(d, bs)
return nil
} return nil
}
func dataSourceBasicServiceSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceBasicServiceSchemaMake() map[string]*schema.Schema {
"service_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "service_id": {
Required: true, Type: schema.TypeInt,
}, Required: true,
"account_id": { },
Type: schema.TypeInt, "account_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"account_name": { },
Type: schema.TypeString, "account_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"base_domain": { },
Type: schema.TypeString, "base_domain": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"computes": { },
Type: schema.TypeList, "computes": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"account_id": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"architecture": { },
Type: schema.TypeString, "architecture": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"compgroup_id": { },
Type: schema.TypeInt, "compgroup_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"compgroup_name": { },
Type: schema.TypeString, "compgroup_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"compgroup_role": { },
Type: schema.TypeString, "compgroup_role": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"id": { },
Type: schema.TypeInt, "id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"rg_id": { },
Type: schema.TypeInt, "rg_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"stack_id": { },
Type: schema.TypeInt, "stack_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"name": { },
Type: schema.TypeString, "name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"status": { },
Type: schema.TypeString, "status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"tech_status": { },
Type: schema.TypeString, "tech_status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
}, },
}, },
}, },
},
"cpu_total": {
Type: schema.TypeInt, "cpu_total": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"created_by": { },
Type: schema.TypeString, "created_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"created_time": { },
Type: schema.TypeInt, "created_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"deleted_by": { },
Type: schema.TypeString, "deleted_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"deleted_time": { },
Type: schema.TypeInt, "deleted_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"disk_total": { },
Type: schema.TypeInt, "disk_total": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"gid": { },
Type: schema.TypeInt, "gid": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"groups": { },
Type: schema.TypeList, "groups": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"computes": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "computes": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"consistency": { },
Type: schema.TypeBool, "consistency": {
Computed: true, Type: schema.TypeBool,
}, Computed: true,
"id": { },
Type: schema.TypeInt, "id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"name": { },
Type: schema.TypeString, "name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"status": { },
Type: schema.TypeString, "status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"tech_status": { },
Type: schema.TypeString, "tech_status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
}, },
}, },
}, },
// "groups_name": { },
// Type: schema.TypeList, // "groups_name": {
// Computed: true, // Type: schema.TypeList,
// Elem: &schema.Schema{ // Computed: true,
// Type: schema.TypeString, // Elem: &schema.Schema{
// }, // Type: schema.TypeString,
// }, // },
"guid": { // },
Type: schema.TypeInt, "guid": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"milestones": { },
Type: schema.TypeInt, "milestones": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"service_name": { },
Type: schema.TypeString, "service_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"parent_srv_id": { },
Type: schema.TypeInt, "parent_srv_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"parent_srv_type": { },
Type: schema.TypeString, "parent_srv_type": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"ram_total": { },
Type: schema.TypeInt, "ram_total": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"rg_id": { },
Type: schema.TypeInt, "rg_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"rg_name": { },
Type: schema.TypeString, "rg_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"snapshots": { },
Type: schema.TypeList, "snapshots": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"guid": { Schema: map[string]*schema.Schema{
Type: schema.TypeString, "guid": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"label": { },
Type: schema.TypeString, "label": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"timestamp": { },
Type: schema.TypeInt, "timestamp": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"valid": { },
Type: schema.TypeBool, "valid": {
Computed: true, Type: schema.TypeBool,
}, Computed: true,
}, },
}, },
}, },
},
"ssh_key": {
Type: schema.TypeString, "ssh_key": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"ssh_user": { },
Type: schema.TypeString, "ssh_user": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"status": { },
Type: schema.TypeString, "status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"tech_status": { },
Type: schema.TypeString, "tech_status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"updated_by": { },
Type: schema.TypeString, "updated_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"updated_time": { },
Type: schema.TypeInt, "updated_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"user_managed": { },
Type: schema.TypeBool, "user_managed": {
Computed: true, Type: schema.TypeBool,
}, Computed: true,
} },
return res }
} return res
}
func DataSourceBasicService() *schema.Resource {
return &schema.Resource{ func DataSourceBasicService() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceBasicServiceRead,
ReadContext: dataSourceBasicServiceRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceBasicServiceSchemaMake(),
} Schema: dataSourceBasicServiceSchemaMake(),
} }
}

@ -1,84 +1,85 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package bservice package bservice
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceBasicServiceDeletedListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceBasicServiceDeletedListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
basicServiceDeletedList, err := utilityBasicServiceDeletedListCheckPresence(ctx, d, m) basicServiceDeletedList, err := utilityBasicServiceDeletedListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenBasicServiceList(basicServiceDeletedList)) d.SetId(id.String())
d.Set("entry_count", basicServiceDeletedList.EntryCount) d.Set("items", flattenBasicServiceList(basicServiceDeletedList))
d.Set("entry_count", basicServiceDeletedList.EntryCount)
return nil
} return nil
}
func dataSourceBasicServiceDeletedListSchemaMake() map[string]*schema.Schema {
temp := dataSourceBasicServiceListSchemaMake() func dataSourceBasicServiceDeletedListSchemaMake() map[string]*schema.Schema {
temp := dataSourceBasicServiceListSchemaMake()
delete(temp, "by_id")
delete(temp, "name") delete(temp, "by_id")
delete(temp, "rg_name") delete(temp, "name")
delete(temp, "status") delete(temp, "rg_name")
delete(temp, "tech_status") delete(temp, "status")
delete(temp, "account_name") delete(temp, "tech_status")
delete(temp, "account_name")
return temp
} return temp
}
func DataSourceBasicServiceDeletedList() *schema.Resource {
return &schema.Resource{ func DataSourceBasicServiceDeletedList() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceBasicServiceDeletedListRead,
ReadContext: dataSourceBasicServiceDeletedListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceBasicServiceDeletedListSchemaMake(),
} Schema: dataSourceBasicServiceDeletedListSchemaMake(),
} }
}

@ -1,304 +1,305 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package bservice package bservice
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceBasicServiceGroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceBasicServiceGroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
bsg, err := utilityBasicServiceGroupCheckPresence(ctx, d, m) bsg, err := utilityBasicServiceGroupCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("account_id", bsg.AccountID) d.SetId(id.String())
d.Set("account_name", bsg.AccountName) d.Set("account_id", bsg.AccountID)
d.Set("computes", flattenBSGroupComputes(bsg.Computes)) d.Set("account_name", bsg.AccountName)
d.Set("consistency", bsg.Consistency) d.Set("computes", flattenBSGroupComputes(bsg.Computes))
d.Set("cpu", bsg.CPU) d.Set("consistency", bsg.Consistency)
d.Set("created_by", bsg.CreatedBy) d.Set("cpu", bsg.CPU)
d.Set("created_time", bsg.CreatedTime) d.Set("created_by", bsg.CreatedBy)
d.Set("deleted_by", bsg.DeletedBy) d.Set("created_time", bsg.CreatedTime)
d.Set("deleted_time", bsg.DeletedTime) d.Set("deleted_by", bsg.DeletedBy)
d.Set("disk", bsg.Disk) d.Set("deleted_time", bsg.DeletedTime)
d.Set("driver", bsg.Driver) d.Set("disk", bsg.Disk)
d.Set("extnets", bsg.ExtNets) d.Set("driver", bsg.Driver)
d.Set("gid", bsg.GID) d.Set("extnets", bsg.ExtNets)
d.Set("guid", bsg.GUID) d.Set("gid", bsg.GID)
d.Set("image_id", bsg.ImageID) d.Set("guid", bsg.GUID)
d.Set("milestones", bsg.Milestones) d.Set("image_id", bsg.ImageID)
d.Set("compgroup_name", bsg.Name) d.Set("milestones", bsg.Milestones)
d.Set("parents", bsg.Parents) d.Set("compgroup_name", bsg.Name)
d.Set("ram", bsg.RAM) d.Set("parents", bsg.Parents)
d.Set("rg_id", bsg.RGID) d.Set("ram", bsg.RAM)
d.Set("rg_name", bsg.RGName) d.Set("rg_id", bsg.RGID)
d.Set("role", bsg.Role) d.Set("rg_name", bsg.RGName)
d.Set("sep_id", bsg.SEPID) d.Set("role", bsg.Role)
d.Set("seq_no", bsg.SeqNo) d.Set("sep_id", bsg.SEPID)
d.Set("status", bsg.Status) d.Set("seq_no", bsg.SeqNo)
d.Set("tech_status", bsg.TechStatus) d.Set("status", bsg.Status)
d.Set("timeout_start", bsg.TimeoutStart) d.Set("tech_status", bsg.TechStatus)
d.Set("updated_by", bsg.UpdatedBy) d.Set("timeout_start", bsg.TimeoutStart)
d.Set("updated_time", bsg.UpdatedTime) d.Set("updated_by", bsg.UpdatedBy)
d.Set("vinses", bsg.VINSes) d.Set("updated_time", bsg.UpdatedTime)
return nil d.Set("vinses", bsg.VINSes)
} return nil
}
func flattenBSGroupOSUsers(bsgosus bservice.ListOSUsers) []map[string]interface{} {
res := make([]map[string]interface{}, 0) func flattenBSGroupOSUsers(bsgosus bservice.ListOSUsers) []map[string]interface{} {
for _, bsgosu := range bsgosus { res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{ for _, bsgosu := range bsgosus {
"login": bsgosu.Login, temp := map[string]interface{}{
"password": bsgosu.Password, "login": bsgosu.Login,
} "password": bsgosu.Password,
res = append(res, temp) }
} res = append(res, temp)
}
return res
} return res
}
func flattenBSGroupComputes(bsgcs bservice.ListGroupComputes) []map[string]interface{} {
res := make([]map[string]interface{}, 0) func flattenBSGroupComputes(bsgcs bservice.ListGroupComputes) []map[string]interface{} {
for _, bsgc := range bsgcs { res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{ for _, bsgc := range bsgcs {
"id": bsgc.ID, temp := map[string]interface{}{
"ip_addresses": bsgc.IPAddresses, "id": bsgc.ID,
"name": bsgc.Name, "ip_addresses": bsgc.IPAddresses,
"os_users": flattenBSGroupOSUsers(bsgc.OSUsers), "name": bsgc.Name,
} "os_users": flattenBSGroupOSUsers(bsgc.OSUsers),
res = append(res, temp) }
} res = append(res, temp)
return res }
} return res
}
func dataSourceBasicServiceGroupSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceBasicServiceGroupSchemaMake() map[string]*schema.Schema {
"service_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "service_id": {
Required: true, Type: schema.TypeInt,
}, Required: true,
"compgroup_id": { },
Type: schema.TypeInt, "compgroup_id": {
Required: true, Type: schema.TypeInt,
}, Required: true,
"account_id": { },
Type: schema.TypeInt, "account_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"account_name": { },
Type: schema.TypeString, "account_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"computes": { },
Type: schema.TypeList, "computes": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"id": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"ip_addresses": { },
Type: schema.TypeList, "ip_addresses": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Schema{ Computed: true,
Type: schema.TypeString, Elem: &schema.Schema{
}, Type: schema.TypeString,
}, },
"name": { },
Type: schema.TypeString, "name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"os_users": { },
Type: schema.TypeList, "os_users": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"login": { Schema: map[string]*schema.Schema{
Type: schema.TypeString, "login": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"password": { },
Type: schema.TypeString, "password": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
}, },
}, },
}, },
}, },
}, },
}, },
"consistency": { },
Type: schema.TypeBool, "consistency": {
Computed: true, Type: schema.TypeBool,
}, Computed: true,
"cpu": { },
Type: schema.TypeInt, "cpu": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"created_by": { },
Type: schema.TypeString, "created_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"created_time": { },
Type: schema.TypeInt, "created_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"deleted_by": { },
Type: schema.TypeString, "deleted_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"deleted_time": { },
Type: schema.TypeInt, "deleted_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"disk": { },
Type: schema.TypeInt, "disk": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"driver": { },
Type: schema.TypeString, "driver": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"extnets": { },
Type: schema.TypeList, "extnets": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Schema{ Computed: true,
Type: schema.TypeInt, Elem: &schema.Schema{
}, Type: schema.TypeInt,
}, },
"gid": { },
Type: schema.TypeInt, "gid": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"guid": { },
Type: schema.TypeInt, "guid": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"image_id": { },
Type: schema.TypeInt, "image_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"milestones": { },
Type: schema.TypeInt, "milestones": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"compgroup_name": { },
Type: schema.TypeString, "compgroup_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"parents": { },
Type: schema.TypeList, "parents": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Schema{ Computed: true,
Type: schema.TypeInt, Elem: &schema.Schema{
}, Type: schema.TypeInt,
}, },
"ram": { },
Type: schema.TypeInt, "ram": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"rg_id": { },
Type: schema.TypeInt, "rg_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"rg_name": { },
Type: schema.TypeString, "rg_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"role": { },
Type: schema.TypeString, "role": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"sep_id": { },
Type: schema.TypeInt, "sep_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"seq_no": { },
Type: schema.TypeInt, "seq_no": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"status": { },
Type: schema.TypeString, "status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"tech_status": { },
Type: schema.TypeString, "tech_status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"timeout_start": { },
Type: schema.TypeInt, "timeout_start": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"updated_by": { },
Type: schema.TypeString, "updated_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"updated_time": { },
Type: schema.TypeInt, "updated_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"vinses": { },
Type: schema.TypeList, "vinses": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Schema{ Computed: true,
Type: schema.TypeInt, Elem: &schema.Schema{
}, Type: schema.TypeInt,
}, },
} },
return res }
} return res
}
func DataSourceBasicServiceGroup() *schema.Resource {
return &schema.Resource{ func DataSourceBasicServiceGroup() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceBasicServiceGroupRead,
ReadContext: dataSourceBasicServiceGroupRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceBasicServiceGroupSchemaMake(),
} Schema: dataSourceBasicServiceGroupSchemaMake(),
} }
}

@ -1,263 +1,264 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package bservice package bservice
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func flattenBasicServiceList(bsl *bservice.ListBasicServices) []map[string]interface{} { func flattenBasicServiceList(bsl *bservice.ListBasicServices) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
for _, bs := range bsl.Data { for _, bs := range bsl.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"account_id": bs.AccountID, "account_id": bs.AccountID,
"account_name": bs.AccountName, "account_name": bs.AccountName,
"base_domain": bs.BaseDomain, "base_domain": bs.BaseDomain,
"created_by": bs.CreatedBy, "created_by": bs.CreatedBy,
"created_time": bs.CreatedTime, "created_time": bs.CreatedTime,
"deleted_by": bs.DeletedBy, "deleted_by": bs.DeletedBy,
"deleted_time": bs.DeletedTime, "deleted_time": bs.DeletedTime,
"gid": bs.GID, "gid": bs.GID,
"groups": bs.Groups, "groups": bs.Groups,
"guid": bs.GUID, "guid": bs.GUID,
"service_id": bs.ID, "service_id": bs.ID,
"service_name": bs.Name, "service_name": bs.Name,
"parent_srv_id": bs.ParentSrvID, "parent_srv_id": bs.ParentSrvID,
"parent_srv_type": bs.ParentSrvType, "parent_srv_type": bs.ParentSrvType,
"rg_id": bs.RGID, "rg_id": bs.RGID,
"rg_name": bs.RGName, "rg_name": bs.RGName,
"ssh_user": bs.SSHUser, "ssh_user": bs.SSHUser,
"status": bs.Status, "status": bs.Status,
"tech_status": bs.TechStatus, "tech_status": bs.TechStatus,
"updated_by": bs.UpdatedBy, "updated_by": bs.UpdatedBy,
"updated_time": bs.UpdatedTime, "updated_time": bs.UpdatedTime,
"user_managed": bs.UserManaged, "user_managed": bs.UserManaged,
} }
res = append(res, temp) res = append(res, temp)
} }
return res return res
} }
func dataSourceBasicServiceListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceBasicServiceListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
basicServiceList, err := utilityBasicServiceListCheckPresence(ctx, d, m) basicServiceList, err := utilityBasicServiceListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenBasicServiceList(basicServiceList)) d.SetId(id.String())
d.Set("entry_count", basicServiceList.EntryCount) d.Set("items", flattenBasicServiceList(basicServiceList))
d.Set("entry_count", basicServiceList.EntryCount)
return nil
} return nil
}
func dataSourceBasicServiceListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceBasicServiceListSchemaMake() map[string]*schema.Schema {
"by_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "by_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by ID", Optional: true,
}, Description: "Filter by ID",
"name": { },
Type: schema.TypeString, "name": {
Optional: true, Type: schema.TypeString,
Description: "Filter by bservice name", Optional: true,
}, Description: "Filter by bservice name",
"rg_name": { },
Type: schema.TypeString, "rg_name": {
Optional: true, Type: schema.TypeString,
Description: "Filter by resource group name", Optional: true,
}, Description: "Filter by resource group name",
"status": { },
Type: schema.TypeString, "status": {
Optional: true, Type: schema.TypeString,
Description: "Filter by status", Optional: true,
}, Description: "Filter by status",
"tech_status": { },
Type: schema.TypeString, "tech_status": {
Optional: true, Type: schema.TypeString,
Description: "Filter by tech status", Optional: true,
}, Description: "Filter by tech status",
"account_name": { },
Type: schema.TypeString, "account_name": {
Optional: true, Type: schema.TypeString,
Description: "Filter by account name", Optional: true,
}, Description: "Filter by account name",
"account_id": { },
Type: schema.TypeInt, "account_id": {
Optional: true, Type: schema.TypeInt,
Description: "ID of the account to query for BasicService instances", Optional: true,
}, Description: "ID of the account to query for BasicService instances",
"rg_id": { },
Type: schema.TypeInt, "rg_id": {
Optional: true, Type: schema.TypeInt,
Description: "ID of the resource group to query for BasicService instances", Optional: true,
}, Description: "ID of the resource group to query for BasicService instances",
"page": { },
Type: schema.TypeInt, "page": {
Optional: true, Type: schema.TypeInt,
Description: "Page number", Optional: true,
}, Description: "Page number",
"size": { },
Type: schema.TypeInt, "size": {
Optional: true, Type: schema.TypeInt,
Description: "Page size", Optional: true,
}, Description: "Page size",
"items": { },
Type: schema.TypeList, "items": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"account_id": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"account_name": { },
Type: schema.TypeString, "account_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"base_domain": { },
Type: schema.TypeString, "base_domain": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"created_by": { },
Type: schema.TypeString, "created_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"created_time": { },
Type: schema.TypeInt, "created_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"deleted_by": { },
Type: schema.TypeString, "deleted_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"deleted_time": { },
Type: schema.TypeInt, "deleted_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"gid": { },
Type: schema.TypeInt, "gid": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"groups": { },
Type: schema.TypeList, "groups": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Schema{ Computed: true,
Type: schema.TypeInt, Elem: &schema.Schema{
}, Type: schema.TypeInt,
}, },
"guid": { },
Type: schema.TypeInt, "guid": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"service_id": { },
Type: schema.TypeInt, "service_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"service_name": { },
Type: schema.TypeString, "service_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"parent_srv_id": { },
Type: schema.TypeInt, "parent_srv_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"parent_srv_type": { },
Type: schema.TypeString, "parent_srv_type": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"rg_id": { },
Type: schema.TypeInt, "rg_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"rg_name": { },
Type: schema.TypeString, "rg_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"ssh_user": { },
Type: schema.TypeString, "ssh_user": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"status": { },
Type: schema.TypeString, "status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"tech_status": { },
Type: schema.TypeString, "tech_status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"updated_by": { },
Type: schema.TypeString, "updated_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"updated_time": { },
Type: schema.TypeInt, "updated_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"user_managed": { },
Type: schema.TypeBool, "user_managed": {
Computed: true, Type: schema.TypeBool,
}, Computed: true,
}, },
}, },
}, },
"entry_count": { },
Type: schema.TypeInt, "entry_count": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
} },
return res }
} return res
}
func DataSourceBasicServiceList() *schema.Resource {
return &schema.Resource{ func DataSourceBasicServiceList() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceBasicServiceListRead,
ReadContext: dataSourceBasicServiceListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceBasicServiceListSchemaMake(),
} Schema: dataSourceBasicServiceListSchemaMake(),
} }
}

@ -1,105 +1,110 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package bservice package bservice
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceBasicServiceSnapshotListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceBasicServiceSnapshotListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
basicServiceSnapshotList, err := utilityBasicServiceSnapshotListCheckPresence(ctx, d, m) basicServiceSnapshotList, err := utilityBasicServiceSnapshotListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenBasicServiceSnapshotsList(basicServiceSnapshotList)) d.SetId(id.String())
d.Set("entry_count", basicServiceSnapshotList.EntryCount) d.Set("items", flattenBasicServiceSnapshotsList(basicServiceSnapshotList))
return nil d.Set("entry_count", basicServiceSnapshotList.EntryCount)
} return nil
}
func dataSourceBasicServiceSnapshotListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceBasicServiceSnapshotListSchemaMake() map[string]*schema.Schema {
"service_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "service_id": {
Required: true, Type: schema.TypeInt,
Description: "ID of the BasicService instance", Required: true,
}, Description: "ID of the BasicService instance",
"items": { },
Type: schema.TypeList, "items": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"guid": { Schema: map[string]*schema.Schema{
Type: schema.TypeString, "guid": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"label": { },
Type: schema.TypeString, "label": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"timestamp": { },
Type: schema.TypeInt, "timestamp": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"valid": { },
Type: schema.TypeBool, "valid": {
Computed: true, Type: schema.TypeBool,
}, Computed: true,
}, },
}, },
}, },
} },
return res "entry_count": {
} Type: schema.TypeInt,
Computed: true,
func DataSourceBasicServiceSnapshotList() *schema.Resource { },
return &schema.Resource{ }
SchemaVersion: 1, return res
}
ReadContext: dataSourceBasicServiceSnapshotListRead,
func DataSourceBasicServiceSnapshotList() *schema.Resource {
Timeouts: &schema.ResourceTimeout{ return &schema.Resource{
Read: &constants.Timeout30s, SchemaVersion: 1,
Default: &constants.Timeout60s,
}, ReadContext: dataSourceBasicServiceSnapshotListRead,
Schema: dataSourceBasicServiceSnapshotListSchemaMake(), Timeouts: &schema.ResourceTimeout{
} Read: &constants.Timeout30s,
} Default: &constants.Timeout60s,
},
Schema: dataSourceBasicServiceSnapshotListSchemaMake(),
}
}

File diff suppressed because it is too large Load Diff

@ -47,6 +47,7 @@ import (
func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m) disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }

@ -1,499 +1,500 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package disks package disks
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskList, err := utilityDiskListCheckPresence(ctx, d, m) diskList, err := utilityDiskListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenDiskList(diskList)) d.SetId(id.String())
d.Set("entry_count", diskList.EntryCount) d.Set("items", flattenDiskList(diskList))
d.Set("entry_count", diskList.EntryCount)
return nil
} return nil
}
func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
"by_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "by_id": {
Optional: true, Type: schema.TypeInt,
Description: "Find by ID", Optional: true,
}, Description: "Find by ID",
"name": { },
Type: schema.TypeString, "name": {
Optional: true, Type: schema.TypeString,
Description: "Find by name", Optional: true,
}, Description: "Find by name",
"account_name": { },
Type: schema.TypeString, "account_name": {
Optional: true, Type: schema.TypeString,
Description: "Find by account name", Optional: true,
}, Description: "Find by account name",
"disk_max_size": { },
Type: schema.TypeInt, "disk_max_size": {
Optional: true, Type: schema.TypeInt,
Description: "Find by max disk size", Optional: true,
}, Description: "Find by max disk size",
"status": { },
Type: schema.TypeString, "status": {
Optional: true, Type: schema.TypeString,
Description: "Find by status", Optional: true,
}, Description: "Find by status",
"shared": { },
Type: schema.TypeBool, "shared": {
Optional: true, Type: schema.TypeBool,
Description: "Find by shared field", Optional: true,
}, Description: "Find by shared field",
"account_id": { },
Type: schema.TypeInt, "account_id": {
Optional: true, Type: schema.TypeInt,
Description: "ID of the account the disks belong to", Optional: true,
}, Description: "ID of the account the disks belong to",
"type": { },
Type: schema.TypeString, "type": {
Optional: true, Type: schema.TypeString,
Description: "type of the disks", Optional: true,
}, Description: "type of the disks",
"sep_id": { },
Type: schema.TypeInt, "sep_id": {
Optional: true, Type: schema.TypeInt,
Description: "find by sep ID", Optional: true,
}, Description: "find by sep ID",
"pool_name": { },
Type: schema.TypeString, "pool_name": {
Optional: true, Type: schema.TypeString,
Description: "find by pool name", Optional: true,
}, Description: "find by pool name",
"page": { },
Type: schema.TypeInt, "page": {
Optional: true, Type: schema.TypeInt,
Description: "Page number", Optional: true,
}, Description: "Page number",
"size": { },
Type: schema.TypeInt, "size": {
Optional: true, Type: schema.TypeInt,
Description: "Page size", Optional: true,
}, Description: "Page size",
"items": { },
Type: schema.TypeList, "items": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"account_id": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Computed: true, Type: schema.TypeInt,
Description: "The unique ID of the subscriber-owner of the disk", Computed: true,
}, Description: "The unique ID of the subscriber-owner of the disk",
"account_name": { },
Type: schema.TypeString, "account_name": {
Computed: true, Type: schema.TypeString,
Description: "The name of the subscriber '(account') to whom this disk belongs", Computed: true,
}, Description: "The name of the subscriber '(account') to whom this disk belongs",
"acl": { },
Type: schema.TypeString, "acl": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
// "boot_partition": { },
// Type: schema.TypeInt, // "boot_partition": {
// Computed: true, // Type: schema.TypeInt,
// Description: "Number of disk partitions", // Computed: true,
// }, // Description: "Number of disk partitions",
"computes": { // },
Type: schema.TypeList, "computes": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"compute_id": { Schema: map[string]*schema.Schema{
Type: schema.TypeString, "compute_id": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"compute_name": { },
Type: schema.TypeString, "compute_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
}, },
}, },
}, },
"created_time": { },
Type: schema.TypeInt, "created_time": {
Computed: true, Type: schema.TypeInt,
Description: "Created time", Computed: true,
}, Description: "Created time",
"deleted_time": { },
Type: schema.TypeInt, "deleted_time": {
Computed: true, Type: schema.TypeInt,
Description: "Deleted time", Computed: true,
}, Description: "Deleted time",
"desc": { },
Type: schema.TypeString, "desc": {
Computed: true, Type: schema.TypeString,
Description: "Description of disk", Computed: true,
}, Description: "Description of disk",
"destruction_time": { },
Type: schema.TypeInt, "destruction_time": {
Computed: true, Type: schema.TypeInt,
Description: "Time of final deletion", Computed: true,
}, Description: "Time of final deletion",
"devicename": { },
Type: schema.TypeString, "devicename": {
Computed: true, Type: schema.TypeString,
Description: "Name of the device", Computed: true,
}, Description: "Name of the device",
// "disk_path": { },
// Type: schema.TypeString, // "disk_path": {
// Computed: true, // Type: schema.TypeString,
// Description: "Disk path", // Computed: true,
// }, // Description: "Disk path",
"gid": { // },
Type: schema.TypeInt, "gid": {
Computed: true, Type: schema.TypeInt,
Description: "ID of the grid (platform)", Computed: true,
}, Description: "ID of the grid (platform)",
// "guid": { },
// Type: schema.TypeInt, // "guid": {
// Computed: true, // Type: schema.TypeInt,
// Description: "Disk ID on the storage side", // Computed: true,
// }, // Description: "Disk ID on the storage side",
"disk_id": { // },
Type: schema.TypeInt, "disk_id": {
Computed: true, Type: schema.TypeInt,
Description: "The unique ID of the subscriber-owner of the disk", Computed: true,
}, Description: "The unique ID of the subscriber-owner of the disk",
"image_id": { },
Type: schema.TypeInt, "image_id": {
Computed: true, Type: schema.TypeInt,
Description: "Image ID", Computed: true,
}, Description: "Image ID",
"images": { },
Type: schema.TypeList, "images": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Schema{ Computed: true,
Type: schema.TypeString, Elem: &schema.Schema{
}, Type: schema.TypeString,
Description: "IDs of images using the disk", },
}, Description: "IDs of images using the disk",
"iotune": { },
Type: schema.TypeList, "iotune": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"read_bytes_sec": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "read_bytes_sec": {
Computed: true, Type: schema.TypeInt,
Description: "Number of bytes to read per second", Computed: true,
}, Description: "Number of bytes to read per second",
"read_bytes_sec_max": { },
Type: schema.TypeInt, "read_bytes_sec_max": {
Computed: true, Type: schema.TypeInt,
Description: "Maximum number of bytes to read", Computed: true,
}, Description: "Maximum number of bytes to read",
"read_iops_sec": { },
Type: schema.TypeInt, "read_iops_sec": {
Computed: true, Type: schema.TypeInt,
Description: "Number of io read operations per second", Computed: true,
}, Description: "Number of io read operations per second",
"read_iops_sec_max": { },
Type: schema.TypeInt, "read_iops_sec_max": {
Computed: true, Type: schema.TypeInt,
Description: "Maximum number of io read operations", Computed: true,
}, Description: "Maximum number of io read operations",
"size_iops_sec": { },
Type: schema.TypeInt, "size_iops_sec": {
Computed: true, Type: schema.TypeInt,
Description: "Size of io operations", Computed: true,
}, Description: "Size of io operations",
"total_bytes_sec": { },
Type: schema.TypeInt, "total_bytes_sec": {
Computed: true, Type: schema.TypeInt,
Description: "Total size bytes per second", Computed: true,
}, Description: "Total size bytes per second",
"total_bytes_sec_max": { },
Type: schema.TypeInt, "total_bytes_sec_max": {
Computed: true, Type: schema.TypeInt,
Description: "Maximum total size of bytes per second", Computed: true,
}, Description: "Maximum total size of bytes per second",
"total_iops_sec": { },
Type: schema.TypeInt, "total_iops_sec": {
Computed: true, Type: schema.TypeInt,
Description: "Total number of io operations per second", Computed: true,
}, Description: "Total number of io operations per second",
"total_iops_sec_max": { },
Type: schema.TypeInt, "total_iops_sec_max": {
Computed: true, Type: schema.TypeInt,
Description: "Maximum total number of io operations per second", Computed: true,
}, Description: "Maximum total number of io operations per second",
"write_bytes_sec": { },
Type: schema.TypeInt, "write_bytes_sec": {
Computed: true, Type: schema.TypeInt,
Description: "Number of bytes to write per second", Computed: true,
}, Description: "Number of bytes to write per second",
"write_bytes_sec_max": { },
Type: schema.TypeInt, "write_bytes_sec_max": {
Computed: true, Type: schema.TypeInt,
Description: "Maximum number of bytes to write per second", Computed: true,
}, Description: "Maximum number of bytes to write per second",
"write_iops_sec": { },
Type: schema.TypeInt, "write_iops_sec": {
Computed: true, Type: schema.TypeInt,
Description: "Number of write operations per second", Computed: true,
}, Description: "Number of write operations per second",
"write_iops_sec_max": { },
Type: schema.TypeInt, "write_iops_sec_max": {
Computed: true, Type: schema.TypeInt,
Description: "Maximum number of write operations per second", Computed: true,
}, Description: "Maximum number of write operations per second",
}, },
}, },
}, },
// "iqn": { },
// Type: schema.TypeString, // "iqn": {
// Computed: true, // Type: schema.TypeString,
// Description: "Disk IQN", // Computed: true,
// }, // Description: "Disk IQN",
// "login": { // },
// Type: schema.TypeString, // "login": {
// Computed: true, // Type: schema.TypeString,
// Description: "Login to access the disk", // Computed: true,
// }, // Description: "Login to access the disk",
"machine_id": { // },
Type: schema.TypeInt, "machine_id": {
Computed: true, Type: schema.TypeInt,
Description: "Machine ID", Computed: true,
}, Description: "Machine ID",
"machine_name": { },
Type: schema.TypeString, "machine_name": {
Computed: true, Type: schema.TypeString,
Description: "Machine name", Computed: true,
}, Description: "Machine name",
// "milestones": { },
// Type: schema.TypeInt, // "milestones": {
// Computed: true, // Type: schema.TypeInt,
// Description: "Milestones", // Computed: true,
// }, // Description: "Milestones",
"disk_name": { // },
Type: schema.TypeString, "disk_name": {
Computed: true, Type: schema.TypeString,
Description: "Name of disk", Computed: true,
}, Description: "Name of disk",
"order": { },
Type: schema.TypeInt, "order": {
Computed: true, Type: schema.TypeInt,
Description: "Disk order", Computed: true,
}, Description: "Disk order",
"params": { },
Type: schema.TypeString, "params": {
Computed: true, Type: schema.TypeString,
Description: "Disk params", Computed: true,
}, Description: "Disk params",
"parent_id": { },
Type: schema.TypeInt, "parent_id": {
Computed: true, Type: schema.TypeInt,
Description: "ID of the parent disk", Computed: true,
}, Description: "ID of the parent disk",
// "passwd": { },
// Type: schema.TypeString, // "passwd": {
// Computed: true, // Type: schema.TypeString,
// Description: "Password to access the disk", // Computed: true,
// }, // Description: "Password to access the disk",
"pci_slot": { // },
Type: schema.TypeInt, "pci_slot": {
Computed: true, Type: schema.TypeInt,
Description: "ID of the pci slot to which the disk is connected", Computed: true,
}, Description: "ID of the pci slot to which the disk is connected",
"pool": { },
Type: schema.TypeString, "pool": {
Computed: true, Type: schema.TypeString,
Description: "Pool for disk location", Computed: true,
}, Description: "Pool for disk location",
"present_to": { },
Type: schema.TypeList, "present_to": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Schema{ Computed: true,
Type: schema.TypeInt, Elem: &schema.Schema{
}, Type: schema.TypeInt,
}, },
// "purge_attempts": { },
// Type: schema.TypeInt, // "purge_attempts": {
// Computed: true, // Type: schema.TypeInt,
// Description: "Number of deletion attempts", // Computed: true,
// }, // Description: "Number of deletion attempts",
"purge_time": { // },
Type: schema.TypeInt, "purge_time": {
Computed: true, Type: schema.TypeInt,
Description: "Time of the last deletion attempt", Computed: true,
}, Description: "Time of the last deletion attempt",
// "reality_device_number": { },
// Type: schema.TypeInt, // "reality_device_number": {
// Computed: true, // Type: schema.TypeInt,
// Description: "Reality device number", // Computed: true,
// }, // Description: "Reality device number",
// "reference_id": { // },
// Type: schema.TypeString, // "reference_id": {
// Computed: true, // Type: schema.TypeString,
// Description: "ID of the reference to the disk", // Computed: true,
// }, // Description: "ID of the reference to the disk",
"res_id": { // },
Type: schema.TypeString, "res_id": {
Computed: true, Type: schema.TypeString,
Description: "Resource ID", Computed: true,
}, Description: "Resource ID",
"res_name": { },
Type: schema.TypeString, "res_name": {
Computed: true, Type: schema.TypeString,
Description: "Name of the resource", Computed: true,
}, Description: "Name of the resource",
"role": { },
Type: schema.TypeString, "role": {
Computed: true, Type: schema.TypeString,
Description: "Disk role", Computed: true,
}, Description: "Disk role",
"sep_id": { },
Type: schema.TypeInt, "sep_id": {
Computed: true, Type: schema.TypeInt,
Description: "Storage endpoint provider ID to create disk", Computed: true,
}, Description: "Storage endpoint provider ID to create disk",
"sep_type": { },
Type: schema.TypeString, "sep_type": {
Computed: true, Type: schema.TypeString,
Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform", Computed: true,
}, Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform",
"shareable": { },
Type: schema.TypeBool, "shareable": {
Computed: true, Type: schema.TypeBool,
}, Computed: true,
"size_max": { },
Type: schema.TypeInt, "size_max": {
Computed: true, Type: schema.TypeInt,
Description: "Size in GB", Computed: true,
}, Description: "Size in GB",
"size_used": { },
Type: schema.TypeFloat, "size_used": {
Computed: true, Type: schema.TypeFloat,
Description: "Number of used space, in GB", Computed: true,
}, Description: "Number of used space, in GB",
"snapshots": { },
Type: schema.TypeList, "snapshots": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"guid": { Schema: map[string]*schema.Schema{
Type: schema.TypeString, "guid": {
Computed: true, Type: schema.TypeString,
Description: "ID of the snapshot", Computed: true,
}, Description: "ID of the snapshot",
"label": { },
Type: schema.TypeString, "label": {
Computed: true, Type: schema.TypeString,
Description: "Name of the snapshot", Computed: true,
}, Description: "Name of the snapshot",
"res_id": { },
Type: schema.TypeString, "res_id": {
Computed: true, Type: schema.TypeString,
Description: "Reference to the snapshot", Computed: true,
}, Description: "Reference to the snapshot",
"snap_set_guid": { },
Type: schema.TypeString, "snap_set_guid": {
Computed: true, Type: schema.TypeString,
Description: "The set snapshot ID", Computed: true,
}, Description: "The set snapshot ID",
"snap_set_time": { },
Type: schema.TypeInt, "snap_set_time": {
Computed: true, Type: schema.TypeInt,
Description: "The set time of the snapshot", Computed: true,
}, Description: "The set time of the snapshot",
"timestamp": { },
Type: schema.TypeInt, "timestamp": {
Computed: true, Type: schema.TypeInt,
Description: "Snapshot time", Computed: true,
}, Description: "Snapshot time",
}, },
}, },
}, },
"status": { },
Type: schema.TypeString, "status": {
Computed: true, Type: schema.TypeString,
Description: "Disk status", Computed: true,
}, Description: "Disk status",
"tech_status": { },
Type: schema.TypeString, "tech_status": {
Computed: true, Type: schema.TypeString,
Description: "Technical status of the disk", Computed: true,
}, Description: "Technical status of the disk",
"type": { },
Type: schema.TypeString, "type": {
Computed: true, Type: schema.TypeString,
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", Computed: true,
}, Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
"vmid": { },
Type: schema.TypeInt, "vmid": {
Computed: true, Type: schema.TypeInt,
Description: "Virtual Machine ID (Deprecated)", Computed: true,
}, Description: "Virtual Machine ID (Deprecated)",
}, },
}, },
}, },
"entry_count": { },
Type: schema.TypeInt, "entry_count": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
} },
return res }
} return res
}
func DataSourceDiskList() *schema.Resource {
return &schema.Resource{ func DataSourceDiskList() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDiskListRead,
ReadContext: dataSourceDiskListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceDiskListSchemaMake(),
} Schema: dataSourceDiskListSchemaMake(),
} }
}

@ -44,6 +44,7 @@ import (
func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
listTypes, err := utilityDiskListTypesCheckPresence(ctx, d, m) listTypes, err := utilityDiskListTypesCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }

@ -47,8 +47,8 @@ func flattenDiskListTypesDetailed(tld *disks.ListTypes) []map[string]interface{}
for _, typeListDetailed := range tld.Data { for _, typeListDetailed := range tld.Data {
toMap := typeListDetailed.(map[string]interface{}) toMap := typeListDetailed.(map[string]interface{})
temp := map[string]interface{}{ temp := map[string]interface{}{
"pools": flattenListTypesDetailedPools(toMap["pools"].([]interface{})), "pools": flattenListTypesDetailedPools(toMap["pools"].([]interface{})),
"sep_id": toMap["sepId"].(float64), "sep_id": toMap["sepId"].(float64),
"sep_name": toMap["sepName"].(string), "sep_name": toMap["sepName"].(string),
} }
res = append(res, temp) res = append(res, temp)
@ -61,9 +61,9 @@ func flattenListTypesDetailedPools(pools []interface{}) []interface{} {
for _, pool := range pools { for _, pool := range pools {
toMap := pool.(map[string]interface{}) toMap := pool.(map[string]interface{})
temp := map[string]interface{}{ temp := map[string]interface{}{
"name": toMap["name"].(string), "name": toMap["name"].(string),
"system": toMap["system"].(string), "system": toMap["system"].(string),
"types": toMap["types"].([]interface{}), "types": toMap["types"].([]interface{}),
} }
res = append(res, temp) res = append(res, temp)
} }
@ -74,6 +74,7 @@ func flattenListTypesDetailedPools(pools []interface{}) []interface{} {
func dataSourceDiskListTypesDetailedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceDiskListTypesDetailedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
listTypesDetailed, err := utilityDiskListTypesDetailedCheckPresence(ctx, d, m) listTypesDetailed, err := utilityDiskListTypesDetailedCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -112,8 +113,8 @@ func dataSourceDiskListTypesDetailedSchemaMake() map[string]*schema.Schema {
Description: "Pool name", Description: "Pool name",
}, },
"system": { "system": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"types": { "types": {
Type: schema.TypeList, Type: schema.TypeList,
@ -132,8 +133,8 @@ func dataSourceDiskListTypesDetailedSchemaMake() map[string]*schema.Schema {
Description: "Storage endpoint provider ID to create disk", Description: "Storage endpoint provider ID to create disk",
}, },
"sep_name": { "sep_name": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
}, },
}, },

@ -44,6 +44,7 @@ import (
func dataSourceDiskListUnattachedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceDiskListUnattachedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskListUnattached, err := utilityDiskListUnattachedCheckPresence(ctx, d, m) diskListUnattached, err := utilityDiskListUnattachedCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -375,6 +376,11 @@ func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
Computed: true, Computed: true,
Description: "Storage endpoint provider ID to create disk", Description: "Storage endpoint provider ID to create disk",
}, },
"shareable": {
Type: schema.TypeBool,
Computed: true,
Description: "shareable",
},
"size_max": { "size_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,

@ -45,6 +45,7 @@ import (
func dataSourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m) disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }

@ -44,6 +44,7 @@ import (
func dataSourceDiskSnapshotListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceDiskSnapshotListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m) disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }

@ -45,6 +45,7 @@ import (
func dataSourceDiskListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceDiskListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskList, err := utilityDiskListDeletedCheckPresence(ctx, d, m) diskList, err := utilityDiskListDeletedCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }

@ -56,6 +56,7 @@ func flattenDiskListUnattached(ul *disks.ListDisksUnattached) []map[string]inter
"res_name": unattachedDisk.ResName, "res_name": unattachedDisk.ResName,
"role": unattachedDisk.Role, "role": unattachedDisk.Role,
"sep_id": unattachedDisk.SEPID, "sep_id": unattachedDisk.SEPID,
"shareable": unattachedDisk.Shareable,
"size_max": unattachedDisk.SizeMax, "size_max": unattachedDisk.SizeMax,
"size_used": unattachedDisk.SizeUsed, "size_used": unattachedDisk.SizeUsed,
"snapshots": flattenDiskSnapshotList(unattachedDisk.Snapshots), "snapshots": flattenDiskSnapshotList(unattachedDisk.Snapshots),

@ -95,6 +95,7 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
diskId, err := c.CloudAPI().Disks().Create(ctx, req) diskId, err := c.CloudAPI().Disks().Create(ctx, req)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -219,6 +220,7 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
disk, err := utilityDiskCheckPresence(ctx, d, m) disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -377,9 +379,9 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface
func resourceDiskSchemaMake() map[string]*schema.Schema { func resourceDiskSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{ rets := map[string]*schema.Schema{
"account_id": { "account_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, //ForceNew: true,
Description: "The unique ID of the subscriber-owner of the disk", Description: "The unique ID of the subscriber-owner of the disk",
}, },
"disk_name": { "disk_name": {
@ -393,9 +395,9 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Description: "Size in GB", Description: "Size in GB",
}, },
"gid": { "gid": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, //ForceNew: true,
Description: "ID of the grid (platform)", Description: "ID of the grid (platform)",
}, },
"pool": { "pool": {

@ -83,6 +83,7 @@ func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m i
func resourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m) disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -169,15 +170,15 @@ func resourceDiskSnapshotDelete(ctx context.Context, d *schema.ResourceData, m i
func resourceDiskSnapshotSchemaMake() map[string]*schema.Schema { func resourceDiskSnapshotSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{ rets := map[string]*schema.Schema{
"disk_id": { "disk_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, //ForceNew: true,
Description: "The unique ID of the subscriber-owner of the disk", Description: "The unique ID of the subscriber-owner of the disk",
}, },
"label": { "label": {
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Required: true,
ForceNew: true, //ForceNew: true,
Description: "Name of the snapshot", Description: "Name of the snapshot",
}, },
"rollback": { "rollback": {

@ -1,285 +1,286 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Tim Tkachev, <tvtkachev@basistech.ru> Tim Tkachev, <tvtkachev@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package extnet package extnet
import ( import (
"context" "context"
"strconv" "strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceExtnetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceExtnetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
e, err := utilityExtnetCheckPresence(ctx, d, m) e, err := utilityExtnetCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(e.ID, 10))
flattenExtnet(d, e) d.SetId(strconv.FormatUint(e.ID, 10))
flattenExtnet(d, e)
return nil
} return nil
}
func dataSourceExtnetSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceExtnetSchemaMake() map[string]*schema.Schema {
"net_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "net_id": {
Required: true, Type: schema.TypeInt,
}, Required: true,
"ckey": { },
Type: schema.TypeString, "ckey": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"meta": { },
Type: schema.TypeList, "meta": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Schema{ Computed: true,
Type: schema.TypeString, Elem: &schema.Schema{
}, Type: schema.TypeString,
Description: "meta", },
}, Description: "meta",
"check_ips": { },
Type: schema.TypeList, "check_ips": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Schema{ Computed: true,
Type: schema.TypeString, Elem: &schema.Schema{
}, Type: schema.TypeString,
}, },
"default": { },
Type: schema.TypeBool, "default": {
Computed: true, Type: schema.TypeBool,
}, Computed: true,
"default_qos": { },
Type: schema.TypeList, "default_qos": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"e_rate": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "e_rate": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"e_burst": { },
Type: schema.TypeInt, "e_burst": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"guid": { },
Type: schema.TypeString, "guid": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"in_burst": { },
Type: schema.TypeInt, "in_burst": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"in_rate": { },
Type: schema.TypeInt, "in_rate": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
}, },
}, },
}, },
"desc": { },
Type: schema.TypeString, "desc": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"dns": { },
Type: schema.TypeList, "dns": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Schema{ Computed: true,
Type: schema.TypeString, Elem: &schema.Schema{
}, Type: schema.TypeString,
}, },
"excluded": { },
Type: schema.TypeList, "excluded": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"client_type": { Schema: map[string]*schema.Schema{
Type: schema.TypeString, "client_type": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"mac": { },
Type: schema.TypeString, "mac": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"ip": { },
Type: schema.TypeString, "ip": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"type": { },
Type: schema.TypeString, "type": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"vm_id": { },
Type: schema.TypeInt, "vm_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
}, },
}, },
}, },
"free_ips": { },
Type: schema.TypeInt, "free_ips": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"gateway": { },
Type: schema.TypeString, "gateway": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"gid": { },
Type: schema.TypeInt, "gid": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"guid": { },
Type: schema.TypeInt, "guid": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"ipcidr": { },
Type: schema.TypeString, "ipcidr": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"milestones": { },
Type: schema.TypeInt, "milestones": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"net_name": { },
Type: schema.TypeString, "net_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"network": { },
Type: schema.TypeString, "network": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"network_id": { },
Type: schema.TypeInt, "network_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"pre_reservations_num": { },
Type: schema.TypeInt, "pre_reservations_num": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"prefix": { },
Type: schema.TypeInt, "prefix": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"pri_vnf_dev_id": { },
Type: schema.TypeInt, "pri_vnf_dev_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"reservations": { },
Type: schema.TypeList, "reservations": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"client_type": { Schema: map[string]*schema.Schema{
Type: schema.TypeString, "client_type": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"domainname": { },
Type: schema.TypeString, "domainname": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"hostname": { },
Type: schema.TypeString, "hostname": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"desc": { },
Type: schema.TypeString, "desc": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"ip": { },
Type: schema.TypeString, "ip": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"mac": { },
Type: schema.TypeString, "mac": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"type": { },
Type: schema.TypeString, "type": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"vm_id": { },
Type: schema.TypeInt, "vm_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
}, },
}, },
}, },
"shared_with": { },
Type: schema.TypeList, "shared_with": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Schema{ Computed: true,
Type: schema.TypeInt, Elem: &schema.Schema{
}, Type: schema.TypeInt,
}, },
"status": { },
Type: schema.TypeString, "status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"vlan_id": { },
Type: schema.TypeInt, "vlan_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"vnfs": { },
Type: schema.TypeList, "vnfs": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"dhcp": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "dhcp": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
}, },
}, },
}, },
} },
return res }
} return res
}
func DataSourceExtnet() *schema.Resource {
return &schema.Resource{ func DataSourceExtnet() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceExtnetRead,
ReadContext: dataSourceExtnetRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceExtnetSchemaMake(),
} Schema: dataSourceExtnetSchemaMake(),
} }
}

@ -1,161 +1,162 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package extnet package extnet
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceExtnetComputesListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceExtnetComputesListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
extnetComputesList, err := utilityExtnetComputesListCheckPresence(ctx, d, m) extnetComputesList, err := utilityExtnetComputesListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenExtnetComputesList(extnetComputesList)) d.SetId(id.String())
d.Set("entry_count", extnetComputesList.EntryCount) d.Set("items", flattenExtnetComputesList(extnetComputesList))
return nil d.Set("entry_count", extnetComputesList.EntryCount)
} return nil
}
func dataSourceExtnetComputesListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceExtnetComputesListSchemaMake() map[string]*schema.Schema {
"account_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Required: true, Type: schema.TypeInt,
Description: "filter by account ID", Required: true,
}, Description: "filter by account ID",
"rg_id": { },
Type: schema.TypeInt, "rg_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by RG ID", Optional: true,
}, Description: "Filter by RG ID",
"compute_id": { },
Type: schema.TypeInt, "compute_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by compute ID", Optional: true,
}, Description: "Filter by compute ID",
"page": { },
Type: schema.TypeInt, "page": {
Optional: true, Type: schema.TypeInt,
Description: "Page number", Optional: true,
}, Description: "Page number",
"size": { },
Type: schema.TypeInt, "size": {
Optional: true, Type: schema.TypeInt,
Description: "Page size", Optional: true,
}, Description: "Page size",
"items": { },
Type: schema.TypeList, "items": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"account_id": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"account_name": { },
Type: schema.TypeString, "account_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"extnets": { },
Type: schema.TypeList, "extnets": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"net_id": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "net_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"ipaddr": { },
Type: schema.TypeString, "ipaddr": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"ipcidr": { },
Type: schema.TypeString, "ipcidr": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"name": { },
Type: schema.TypeString, "name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
}, },
}, },
}, },
"id": { },
Type: schema.TypeInt, "id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"name": { },
Type: schema.TypeString, "name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"rg_id": { },
Type: schema.TypeInt, "rg_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"rg_name": { },
Type: schema.TypeString, "rg_name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
}, },
}, },
}, },
"entry_count": { },
Type: schema.TypeInt, "entry_count": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
} },
return res }
} return res
}
func DataSourceExtnetComputesList() *schema.Resource {
return &schema.Resource{ func DataSourceExtnetComputesList() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceExtnetComputesListRead,
ReadContext: dataSourceExtnetComputesListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceExtnetComputesListSchemaMake(),
} Schema: dataSourceExtnetComputesListSchemaMake(),
} }
}

@ -1,85 +1,86 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package extnet package extnet
import ( import (
"context" "context"
"strconv" "strconv"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceExtnetDefaultRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceExtnetDefaultRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
extnetId, err := utilityExtnetDefaultCheckPresence(ctx, m) extnetId, err := utilityExtnetDefaultCheckPresence(ctx, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
extnetIdInt, err := strconv.ParseInt(extnetId, 10, 32) d.SetId(id.String())
if err != nil { extnetIdInt, err := strconv.ParseInt(extnetId, 10, 32)
return diag.FromErr(err) if err != nil {
} return diag.FromErr(err)
d.Set("net_id", extnetIdInt) }
d.Set("net_id", extnetIdInt)
return nil
} return nil
}
func dataSourceExtnetDefaultSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceExtnetDefaultSchemaMake() map[string]*schema.Schema {
"net_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "net_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
} },
return res }
} return res
}
func DataSourceExtnetDefault() *schema.Resource {
return &schema.Resource{ func DataSourceExtnetDefault() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceExtnetDefaultRead,
ReadContext: dataSourceExtnetDefaultRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceExtnetDefaultSchemaMake(),
} Schema: dataSourceExtnetDefaultSchemaMake(),
} }
}

@ -1,145 +1,150 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package extnet package extnet
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceExtnetListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceExtnetListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
extnetList, err := utilityExtnetListCheckPresence(ctx, d, m) extnetList, err := utilityExtnetListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenExtnetList(extnetList)) d.SetId(id.String())
d.Set("entry_count", extnetList.EntryCount) d.Set("items", flattenExtnetList(extnetList))
d.Set("entry_count", extnetList.EntryCount)
return nil
} return nil
}
func dataSourceExtnetListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceExtnetListSchemaMake() map[string]*schema.Schema {
"account_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "account_id": {
Optional: true, Type: schema.TypeInt,
Description: "Find by account ID", Optional: true,
}, Description: "Find by account ID",
"by_id": { },
Type: schema.TypeInt, "by_id": {
Optional: true, Type: schema.TypeInt,
Description: "Find by ID", Optional: true,
}, Description: "Find by ID",
"name": { },
Type: schema.TypeString, "name": {
Optional: true, Type: schema.TypeString,
Description: "Find by name", Optional: true,
}, Description: "Find by name",
"network": { },
Type: schema.TypeString, "network": {
Optional: true, Type: schema.TypeString,
}, Optional: true,
"vlan_id": { },
Type: schema.TypeInt, "vlan_id": {
Optional: true, Type: schema.TypeInt,
Description: "Find by VLAN ID", Optional: true,
}, Description: "Find by VLAN ID",
"vnfdev_id": { },
Type: schema.TypeInt, "vnfdev_id": {
Optional: true, Type: schema.TypeInt,
Description: "Find by VnfDEV ID", Optional: true,
}, Description: "Find by VnfDEV ID",
"status": { },
Type: schema.TypeString, "status": {
Optional: true, Type: schema.TypeString,
Description: "Find by status", Optional: true,
}, Description: "Find by status",
"page": { },
Type: schema.TypeInt, "page": {
Optional: true, Type: schema.TypeInt,
Description: "Page number", Optional: true,
}, Description: "Page number",
"size": { },
Type: schema.TypeInt, "size": {
Optional: true, Type: schema.TypeInt,
Description: "Page size", Optional: true,
}, Description: "Page size",
"items": { },
Type: schema.TypeList, "items": {
Computed: true, Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Elem: &schema.Resource{
"net_id": { Schema: map[string]*schema.Schema{
Type: schema.TypeInt, "net_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"ipcidr": { },
Type: schema.TypeString, "ipcidr": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"name": { },
Type: schema.TypeString, "name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
}, },
}, "status": {
}, Type: schema.TypeString,
"entry_count": { Computed: true,
Type: schema.TypeInt, },
Computed: true, },
}, },
} },
return res "entry_count": {
} Type: schema.TypeInt,
Computed: true,
func DataSourceExtnetList() *schema.Resource { },
return &schema.Resource{ }
SchemaVersion: 1, return res
}
ReadContext: dataSourceExtnetListRead,
func DataSourceExtnetList() *schema.Resource {
Timeouts: &schema.ResourceTimeout{ return &schema.Resource{
Read: &constants.Timeout30s, SchemaVersion: 1,
Default: &constants.Timeout60s,
}, ReadContext: dataSourceExtnetListRead,
Schema: dataSourceExtnetListSchemaMake(), Timeouts: &schema.ResourceTimeout{
} Read: &constants.Timeout30s,
} Default: &constants.Timeout60s,
},
Schema: dataSourceExtnetListSchemaMake(),
}
}

@ -92,7 +92,7 @@ func flattenExtnetVNFS(evnfs extnet.VNFs) []map[string]interface{} {
} }
func flattenExtnetsComputes(ecs extnet.ListExtNetExtends) []map[string]interface{} { func flattenExtnetsComputes(ecs extnet.ListExtNetExtends) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len (ecs)) res := make([]map[string]interface{}, 0, len(ecs))
for _, ec := range ecs { for _, ec := range ecs {
temp := map[string]interface{}{ temp := map[string]interface{}{
"net_id": ec.ID, "net_id": ec.ID,
@ -129,6 +129,7 @@ func flattenExtnetList(el *extnet.ListExtNets) []map[string]interface{} {
"net_id": e.ID, "net_id": e.ID,
"ipcidr": e.IPCIDR, "ipcidr": e.IPCIDR,
"name": e.Name, "name": e.Name,
"status": e.Status,
} }
res = append(res, temp) res = append(res, temp)
} }

@ -66,6 +66,10 @@ func dataSourceFlipgroupSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
}, },
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"client_ids": { "client_ids": {
Type: schema.TypeList, Type: schema.TypeList,
Computed: true, Computed: true,
@ -85,10 +89,26 @@ func dataSourceFlipgroupSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"default_gw": { "default_gw": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"desc": { "desc": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
@ -121,18 +141,30 @@ func dataSourceFlipgroupSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"status": { "network": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"net_mask": { "rg_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
}, },
"ckey": { "rg_name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"updated_by": {
Type: schema.TypeString,
Computed: true,
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
} }
} }

@ -185,6 +185,13 @@ func dataSourceFlipgroupListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
}, },
}, },
}, },

@ -36,15 +36,21 @@ package flipgroup
import ( import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/flipgroup" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/flipgroup"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
) )
func flattenFlipgroup(d *schema.ResourceData, fg *flipgroup.RecordFLIPGroup) { func flattenFlipgroup(d *schema.ResourceData, fg *flipgroup.RecordFLIPGroup) {
d.Set("account_id", fg.AccountID) d.Set("account_id", fg.AccountID)
d.Set("account_name", fg.AccountName)
d.Set("client_ids", fg.ClientIDs) d.Set("client_ids", fg.ClientIDs)
d.Set("client_type", fg.ClientType) d.Set("client_type", fg.ClientType)
d.Set("conn_id", fg.ConnID) d.Set("conn_id", fg.ConnID)
d.Set("conn_type", fg.ConnType) d.Set("conn_type", fg.ConnType)
d.Set("created_by", fg.CreatedBy)
d.Set("created_time", fg.CreatedTime)
d.Set("default_gw", fg.DefaultGW) d.Set("default_gw", fg.DefaultGW)
d.Set("deleted_by", fg.DeletedBy)
d.Set("deleted_time", fg.DeletedTime)
d.Set("desc", fg.Description) d.Set("desc", fg.Description)
d.Set("gid", fg.GID) d.Set("gid", fg.GID)
d.Set("guid", fg.GUID) d.Set("guid", fg.GUID)
@ -54,13 +60,20 @@ func flattenFlipgroup(d *schema.ResourceData, fg *flipgroup.RecordFLIPGroup) {
d.Set("name", fg.Name) d.Set("name", fg.Name)
d.Set("net_id", fg.NetID) d.Set("net_id", fg.NetID)
d.Set("net_type", fg.NetType) d.Set("net_type", fg.NetType)
d.Set("network", fg.Network)
d.Set("rg_id", fg.RGID)
d.Set("rg_name", fg.RGName)
d.Set("status", fg.Status) d.Set("status", fg.Status)
d.Set("updated_by", fg.UpdatedBy)
d.Set("updated_time", fg.UpdatedTime)
} }
func flattenFlipgroupList(fg_list *flipgroup.ListFLIPGroups) []map[string]interface{} { func flattenFlipgroupList(fg_list *flipgroup.ListFLIPGroups) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
for _, fg := range fg_list.Data { for _, fg := range fg_list.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"ckey": fg.CKey,
"meta": flattens.FlattenMeta(fg.Meta),
"account_id": fg.AccountID, "account_id": fg.AccountID,
"client_ids": fg.ClientIDs, "client_ids": fg.ClientIDs,
"client_type": fg.ClientType, "client_type": fg.ClientType,
@ -76,6 +89,7 @@ func flattenFlipgroupList(fg_list *flipgroup.ListFLIPGroups) []map[string]interf
"name": fg.Name, "name": fg.Name,
"net_id": fg.NetID, "net_id": fg.NetID,
"net_type": fg.NetType, "net_type": fg.NetType,
"net_mask": fg.NetMask,
"status": fg.Status, "status": fg.Status,
} }
res = append(res, temp) res = append(res, temp)

@ -64,6 +64,7 @@ func resourceFlipgroupCreate(ctx context.Context, d *schema.ResourceData, m inte
resp, err := c.CloudAPI().FLIPGroup().Create(ctx, req) resp, err := c.CloudAPI().FLIPGroup().Create(ctx, req)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -136,6 +137,7 @@ func resourceFlipgroupDelete(ctx context.Context, d *schema.ResourceData, m inte
fg, err := utilityFlipgroupCheckPresence(ctx, d, m) fg, err := utilityFlipgroupCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -148,6 +150,8 @@ func resourceFlipgroupDelete(ctx context.Context, d *schema.ResourceData, m inte
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId("")
return nil return nil
} }
@ -205,6 +209,10 @@ func resourceFlipgroupSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
}, },
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"conn_id": { "conn_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
@ -213,10 +221,26 @@ func resourceFlipgroupSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"default_gw": { "default_gw": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"gid": { "gid": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
@ -233,14 +257,26 @@ func resourceFlipgroupSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"net_mask": { "network": {
Type: schema.TypeString,
Computed: true,
},
"rg_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
}, },
"ckey": { "rg_name": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"updated_by": {
Type: schema.TypeString,
Computed: true,
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
} }
} }

@ -34,8 +34,8 @@ package image
import ( import (
"context" "context"
"strconv"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
@ -44,12 +44,11 @@ import (
func dataSourceImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
image, err := utilityImageCheckPresence(ctx, d, m) image, err := utilityImageCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() d.SetId(strconv.Itoa(int(image.ID)))
d.SetId(id.String())
flattenImage(d, image) flattenImage(d, image)

@ -1,159 +1,160 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package image package image
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceImageListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceImageListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
imageList, err := utilityImageListCheckPresence(ctx, d, m) imageList, err := utilityImageListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
id := uuid.New() }
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenImageList(imageList)) d.SetId(id.String())
d.Set("entry_count", imageList.EntryCount) d.Set("items", flattenImageList(imageList))
d.Set("entry_count", imageList.EntryCount)
return nil
} return nil
}
func dataSourceImageListSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{ func dataSourceImageListSchemaMake() map[string]*schema.Schema {
"sep_id": { rets := map[string]*schema.Schema{
Type: schema.TypeInt, "sep_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by Storage Endpoint ID", Optional: true,
}, Description: "Filter by Storage Endpoint ID",
"by_id": { },
Type: schema.TypeInt, "by_id": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by ID", Optional: true,
}, Description: "Filter by ID",
"name": { },
Type: schema.TypeString, "name": {
Optional: true, Type: schema.TypeString,
Description: "Filter by name", Optional: true,
}, Description: "Filter by name",
"status": { },
Type: schema.TypeString, "status": {
Optional: true, Type: schema.TypeString,
Description: "Filter by status", Optional: true,
}, Description: "Filter by status",
"architecture": { },
Type: schema.TypeString, "architecture": {
Optional: true, Type: schema.TypeString,
Description: "Filter by architecture", Optional: true,
}, Description: "Filter by architecture",
"type_image": { },
Type: schema.TypeString, "type_image": {
Optional: true, Type: schema.TypeString,
Description: "Filter by image type", Optional: true,
}, Description: "Filter by image type",
"image_size": { },
Type: schema.TypeInt, "image_size": {
Optional: true, Type: schema.TypeInt,
Description: "Filter by image size", Optional: true,
}, Description: "Filter by image size",
"sep_name": { },
Type: schema.TypeString, "sep_name": {
Optional: true, Type: schema.TypeString,
Description: "Filter by SEP name", Optional: true,
}, Description: "Filter by SEP name",
"pool": { },
Type: schema.TypeString, "pool": {
Optional: true, Type: schema.TypeString,
Description: "Filter by pool", Optional: true,
}, Description: "Filter by pool",
"public": { },
Type: schema.TypeBool, "public": {
Optional: true, Type: schema.TypeBool,
Description: "Find public/private images", Optional: true,
}, Description: "Find public/private images",
"hot_resize": { },
Type: schema.TypeBool, "hot_resize": {
Optional: true, Type: schema.TypeBool,
Description: "Find hot resizable images", Optional: true,
}, Description: "Find hot resizable images",
"bootable": { },
Type: schema.TypeBool, "bootable": {
Optional: true, Type: schema.TypeBool,
Description: "Find bootable images", Optional: true,
}, Description: "Find bootable images",
"page": { },
Type: schema.TypeInt, "page": {
Optional: true, Type: schema.TypeInt,
Description: "page number", Optional: true,
}, Description: "page number",
"size": { },
Type: schema.TypeInt, "size": {
Optional: true, Type: schema.TypeInt,
Description: "page size", Optional: true,
}, Description: "page size",
"items": { },
Type: schema.TypeList, "items": {
Computed: true, Type: schema.TypeList,
Description: "image list", Computed: true,
Elem: &schema.Resource{ Description: "image list",
Schema: dataSourceImageSchemaMake(), Elem: &schema.Resource{
}, Schema: dataSourceImageSchemaMake(),
}, },
"entry_count": { },
Type: schema.TypeInt, "entry_count": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
} },
}
return rets
} return rets
}
func DataSourceImageList() *schema.Resource {
return &schema.Resource{ func DataSourceImageList() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceImageListRead,
ReadContext: dataSourceImageListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceImageListSchemaMake(),
} Schema: dataSourceImageListSchemaMake(),
} }
}

@ -3,6 +3,7 @@ package image
import ( import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/image" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/image"
"strconv"
) )
func flattenHistory(history []image.History) []map[string]interface{} { func flattenHistory(history []image.History) []map[string]interface{} {
@ -23,7 +24,7 @@ func flattenImage(d *schema.ResourceData, img *image.RecordImage) {
d.Set("unc_path", img.UNCPath) d.Set("unc_path", img.UNCPath)
d.Set("ckey", img.CKey) d.Set("ckey", img.CKey)
d.Set("account_id", img.AccountID) d.Set("account_id", img.AccountID)
d.Set("acl", img.ACL) d.Set("acl", FlattenACL(img.ACL))
d.Set("architecture", img.Architecture) d.Set("architecture", img.Architecture)
d.Set("boot_type", img.BootType) d.Set("boot_type", img.BootType)
d.Set("bootable", img.Bootable) d.Set("bootable", img.Bootable)
@ -85,3 +86,18 @@ func flattenImageList(il *image.ListImages) []map[string]interface{} {
} }
return res return res
} }
func FlattenACL(acl interface{}) string {
switch d := acl.(type) {
case string:
return d
case int:
return strconv.Itoa(d)
case int64:
return strconv.FormatInt(d, 10)
case float64:
return strconv.FormatInt(int64(d), 10)
default:
return ""
}
}

@ -1,274 +1,276 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package image package image
import ( import (
"context" "context"
"strconv" "strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/image" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
) )
func resourceImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageCreate: called for image %s", d.Get("name").(string)) log.Debugf("resourceImageCreate: called for image %s", d.Get("name").(string))
haveGID, err := existGID(ctx, d, m) haveGID, err := existGID(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
if !haveGID { if !haveGID {
return diag.Errorf("resourceImageCreate: can't create Image because GID %d is not allowed or does not exist", d.Get("gid").(int)) return diag.Errorf("resourceImageCreate: can't create Image because GID %d is not allowed or does not exist", d.Get("gid").(int))
} }
if _, ok := d.GetOk("account_id"); ok { if _, ok := d.GetOk("account_id"); ok {
haveAccountID, err := existAccountID(ctx, d, m) haveAccountID, err := existAccountID(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
if !haveAccountID { if !haveAccountID {
return diag.Errorf("resourceImageCreate: can't create Image because AccountID %d is not allowed or does not exist", d.Get("account_id").(int)) return diag.Errorf("resourceImageCreate: can't create Image because AccountID %d is not allowed or does not exist", d.Get("account_id").(int))
} }
} }
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
req := image.CreateRequest{} req := image.CreateRequest{}
req.Name = d.Get("name").(string) req.Name = d.Get("name").(string)
req.URL = d.Get("url").(string) req.URL = d.Get("url").(string)
req.GID = uint64(d.Get("gid").(int)) req.GID = uint64(d.Get("gid").(int))
req.BootType = d.Get("boot_type").(string) req.BootType = d.Get("boot_type").(string)
req.ImageType = d.Get("type").(string) req.ImageType = d.Get("type").(string)
drivers := []string{} drivers := []string{}
for _, driver := range d.Get("drivers").([]interface{}) { for _, driver := range d.Get("drivers").([]interface{}) {
drivers = append(drivers, driver.(string)) drivers = append(drivers, driver.(string))
} }
req.Drivers = drivers req.Drivers = drivers
if hotresize, ok := d.GetOk("hot_resize"); ok { if hotresize, ok := d.GetOk("hot_resize"); ok {
req.HotResize = hotresize.(bool) req.HotResize = hotresize.(bool)
} }
if username, ok := d.GetOk("username"); ok { if username, ok := d.GetOk("username"); ok {
req.Username = username.(string) req.Username = username.(string)
} }
if password, ok := d.GetOk("password"); ok { if password, ok := d.GetOk("password"); ok {
req.Password = password.(string) req.Password = password.(string)
} }
if accountId, ok := d.GetOk("account_id"); ok { if accountId, ok := d.GetOk("account_id"); ok {
req.AccountID = uint64(accountId.(int)) req.AccountID = uint64(accountId.(int))
} }
if usernameDL, ok := d.GetOk("username_dl"); ok { if usernameDL, ok := d.GetOk("username_dl"); ok {
req.UsernameDL = usernameDL.(string) req.UsernameDL = usernameDL.(string)
} }
if passwordDL, ok := d.GetOk("password_dl"); ok { if passwordDL, ok := d.GetOk("password_dl"); ok {
req.PasswordDL = passwordDL.(string) req.PasswordDL = passwordDL.(string)
} }
if sepId, ok := d.GetOk("sep_id"); ok { if sepId, ok := d.GetOk("sep_id"); ok {
req.SEPID = uint64(sepId.(int)) req.SEPID = uint64(sepId.(int))
} }
if poolName, ok := d.GetOk("pool_name"); ok { if poolName, ok := d.GetOk("pool_name"); ok {
req.Pool = poolName.(string) req.Pool = poolName.(string)
} }
if architecture, ok := d.GetOk("architecture"); ok { if architecture, ok := d.GetOk("architecture"); ok {
req.Architecture = architecture.(string) req.Architecture = architecture.(string)
} }
imageId, err := c.CloudAPI().Image().Create(ctx, req) imageId, err := c.CloudAPI().Image().Create(ctx, req)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(imageId, 10))
d.Set("image_id", imageId) d.SetId(strconv.FormatUint(imageId, 10))
d.Set("image_id", imageId)
_, err = utilityImageCheckPresence(ctx, d, m)
if err != nil { _, err = utilityImageCheckPresence(ctx, d, m)
return diag.FromErr(err) if err != nil {
} return diag.FromErr(err)
}
return resourceImageRead(ctx, d, m)
} return resourceImageRead(ctx, d, m)
}
func resourceImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageRead: called for %s id: %s", d.Get("name").(string), d.Id()) func resourceImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageRead: called for %s id: %s", d.Get("name").(string), d.Id())
img, err := utilityImageCheckPresence(ctx, d, m)
if img == nil { img, err := utilityImageCheckPresence(ctx, d, m)
d.SetId("") if img == nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
switch img.Status {
case status.Modeled: switch img.Status {
return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status) case status.Modeled:
case status.Creating: return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status)
case status.Created: case status.Creating:
case status.Destroyed, status.Purged: case status.Created:
d.SetId("") case status.Destroyed, status.Purged:
return diag.Errorf("The resource cannot be updated because it has been destroyed") d.SetId("")
// return resourceImageCreate(ctx, d, m) return diag.Errorf("The resource cannot be updated because it has been destroyed")
} // return resourceImageCreate(ctx, d, m)
}
flattenImage(d, img)
flattenImage(d, img)
return nil
} return nil
}
func resourceImageDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageDelete: called for %s, id: %s", d.Get("name").(string), d.Id()) func resourceImageDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageDelete: called for %s, id: %s", d.Get("name").(string), d.Id())
_, err := utilityImageCheckPresence(ctx, d, m)
if err != nil { _, err := utilityImageCheckPresence(ctx, d, m)
return diag.FromErr(err) if err != nil {
} d.SetId("")
return diag.FromErr(err)
c := m.(*controller.ControllerCfg) }
req := image.DeleteRequest{
ImageID: uint64(d.Get("image_id").(int)), c := m.(*controller.ControllerCfg)
} req := image.DeleteRequest{
ImageID: uint64(d.Get("image_id").(int)),
if permanently, ok := d.GetOk("permanently"); ok { }
req.Permanently = permanently.(bool)
} if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
_, err = c.CloudAPI().Image().Delete(ctx, req) }
if err != nil {
return diag.FromErr(err) _, err = c.CloudAPI().Image().Delete(ctx, req)
} if err != nil {
return diag.FromErr(err)
d.SetId("") }
return nil d.SetId("")
}
return nil
func resourceImageRename(ctx context.Context, d *schema.ResourceData, m interface{}) error { }
log.Debugf("resourceImageEditName: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg) func resourceImageRename(ctx context.Context, d *schema.ResourceData, m interface{}) error {
req := image.RenameRequest{ log.Debugf("resourceImageEditName: called for %s, id: %s", d.Get("name").(string), d.Id())
ImageID: uint64(d.Get("image_id").(int)), c := m.(*controller.ControllerCfg)
Name: d.Get("name").(string), req := image.RenameRequest{
} ImageID: uint64(d.Get("image_id").(int)),
Name: d.Get("name").(string),
_, err := c.CloudAPI().Image().Rename(ctx, req) }
if err != nil {
return err _, err := c.CloudAPI().Image().Rename(ctx, req)
} if err != nil {
return err
return nil }
}
return nil
func resourceImageUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { }
log.Debugf("resourceImageUpdate: called for %s, id: %s", d.Get("name").(string), d.Id())
func resourceImageUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
haveGID, err := existGID(ctx, d, m) log.Debugf("resourceImageUpdate: called for %s, id: %s", d.Get("name").(string), d.Id())
if err != nil {
return diag.FromErr(err) haveGID, err := existGID(ctx, d, m)
} if err != nil {
return diag.FromErr(err)
if !haveGID { }
return diag.Errorf("resourceImageUpdate: can't update Image because GID %d is not allowed or does not exist", d.Get("gid").(int))
} if !haveGID {
return diag.Errorf("resourceImageUpdate: can't update Image because GID %d is not allowed or does not exist", d.Get("gid").(int))
if _, ok := d.GetOk("account_id"); ok { }
haveAccountID, err := existAccountID(ctx, d, m)
if err != nil { if _, ok := d.GetOk("account_id"); ok {
return diag.FromErr(err) haveAccountID, err := existAccountID(ctx, d, m)
} if err != nil {
return diag.FromErr(err)
if !haveAccountID { }
return diag.Errorf("resourceImageUpdate: can't update Image because AccountID %d is not allowed or does not exist", d.Get("account_id").(int))
} if !haveAccountID {
} return diag.Errorf("resourceImageUpdate: can't update Image because AccountID %d is not allowed or does not exist", d.Get("account_id").(int))
}
image, err := utilityImageCheckPresence(ctx, d, m) }
if image == nil {
if err != nil { image, err := utilityImageCheckPresence(ctx, d, m)
return diag.FromErr(err) if image == nil {
} if err != nil {
return nil return diag.FromErr(err)
} }
return nil
switch image.Status { }
case status.Modeled:
return diag.Errorf("The image is in status: %s, please, contact support for more information", image.Status) switch image.Status {
case status.Creating: case status.Modeled:
case status.Created: return diag.Errorf("The image is in status: %s, please, contact support for more information", image.Status)
case status.Destroyed, status.Purged: case status.Creating:
d.SetId("") case status.Created:
return diag.Errorf("The resource cannot be updated because it has been destroyed") case status.Destroyed, status.Purged:
// return resourceImageCreate(ctx, d, m) d.SetId("")
} return diag.Errorf("The resource cannot be updated because it has been destroyed")
// return resourceImageCreate(ctx, d, m)
if d.HasChange("name") { }
err := resourceImageRename(ctx, d, m)
if err != nil { if d.HasChange("name") {
return diag.FromErr(err) err := resourceImageRename(ctx, d, m)
} if err != nil {
} return diag.FromErr(err)
}
return resourceImageRead(ctx, d, m) }
}
return resourceImageRead(ctx, d, m)
func ResourceImage() *schema.Resource { }
return &schema.Resource{
SchemaVersion: 1, func ResourceImage() *schema.Resource {
return &schema.Resource{
CreateContext: resourceImageCreate, SchemaVersion: 1,
ReadContext: resourceImageRead,
UpdateContext: resourceImageUpdate, CreateContext: resourceImageCreate,
DeleteContext: resourceImageDelete, ReadContext: resourceImageRead,
UpdateContext: resourceImageUpdate,
Importer: &schema.ResourceImporter{ DeleteContext: resourceImageDelete,
StateContext: schema.ImportStatePassthroughContext,
}, Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
Timeouts: &schema.ResourceTimeout{ },
Create: &constants.Timeout600s,
Read: &constants.Timeout300s, Timeouts: &schema.ResourceTimeout{
Update: &constants.Timeout300s, Create: &constants.Timeout600s,
Delete: &constants.Timeout300s, Read: &constants.Timeout300s,
Default: &constants.Timeout300s, Update: &constants.Timeout300s,
}, Delete: &constants.Timeout300s,
Default: &constants.Timeout300s,
Schema: resourceImageSchemaMake(dataSourceImageExtendSchemaMake()), },
}
} Schema: resourceImageSchemaMake(dataSourceImageExtendSchemaMake()),
}
}

@ -50,6 +50,7 @@ import (
func dataSourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
cluster, err := utilityDataK8sCheckPresence(ctx, d, m) cluster, err := utilityDataK8sCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(strconv.FormatUint(cluster.ID, 10)) d.SetId(strconv.FormatUint(cluster.ID, 10))
@ -399,6 +400,10 @@ func dataSourceK8sSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"network_plugin": {
Type: schema.TypeString,
Computed: true,
},
"rg_id": { "rg_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,

@ -271,6 +271,10 @@ func createK8sListSchema() map[string]*schema.Schema {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"network_plugin": {
Type: schema.TypeString,
Computed: true,
},
"rg_id": { "rg_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,

@ -47,6 +47,7 @@ func dataSourceK8sWgRead(ctx context.Context, d *schema.ResourceData, m interfac
wg, workersComputeList, err := utilityDataK8sWgCheckPresence(ctx, d, m) wg, workersComputeList, err := utilityDataK8sWgCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }

@ -47,6 +47,7 @@ func dataSourceK8sWgCloudInitRead(ctx context.Context, d *schema.ResourceData, m
metaData, err := utilityK8sWgCloudInitCheckPresence(ctx, d, m) metaData, err := utilityK8sWgCloudInitCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }

@ -45,6 +45,7 @@ import (
func dataSourceK8sWgListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceK8sWgListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
wgList, err := utilityK8sWgListCheckPresence(ctx, d, m) wgList, err := utilityK8sWgListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }

@ -106,7 +106,7 @@ func flattenWorkerComputes(cluster *k8s.RecordK8S) []map[string]interface{} {
} }
func flattenAclList(aclList k8s.ListACL) []map[string]interface{} { func flattenAclList(aclList k8s.ListACL) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len (aclList)) res := make([]map[string]interface{}, 0, len(aclList))
for _, acl := range aclList { for _, acl := range aclList {
temp := map[string]interface{}{ temp := map[string]interface{}{
"explicit": acl.Explicit, "explicit": acl.Explicit,
@ -134,7 +134,7 @@ func flattenAcl(acl k8s.RecordACL) []map[string]interface{} {
} }
func flattenInterfaces(interfaces compute.ListInterfaces) []map[string]interface{} { func flattenInterfaces(interfaces compute.ListInterfaces) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len (interfaces)) res := make([]map[string]interface{}, 0, len(interfaces))
for _, interfaceCompute := range interfaces { for _, interfaceCompute := range interfaces {
temp := map[string]interface{}{ temp := map[string]interface{}{
"def_gw": interfaceCompute.DefGW, "def_gw": interfaceCompute.DefGW,
@ -147,7 +147,7 @@ func flattenInterfaces(interfaces compute.ListInterfaces) []map[string]interface
} }
func flattenDetailedInfo(detailedInfoList k8s.ListDetailedInfo, computes []compute.RecordCompute) []map[string]interface{} { func flattenDetailedInfo(detailedInfoList k8s.ListDetailedInfo, computes []compute.RecordCompute) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len (detailedInfoList)) res := make([]map[string]interface{}, 0, len(detailedInfoList))
if computes != nil { if computes != nil {
for i, detailedInfo := range detailedInfoList { for i, detailedInfo := range detailedInfoList {
temp := map[string]interface{}{ temp := map[string]interface{}{
@ -247,6 +247,7 @@ func flattenK8sData(d *schema.ResourceData, cluster k8s.RecordK8S, masters []com
d.Set("workers", flattenK8sGroup(cluster.K8SGroups.Workers, workers)) d.Set("workers", flattenK8sGroup(cluster.K8SGroups.Workers, workers))
d.Set("lb_id", cluster.LBID) d.Set("lb_id", cluster.LBID)
d.Set("name", cluster.Name) d.Set("name", cluster.Name)
d.Set("network_plugin", cluster.NetworkPlugin)
d.Set("rg_id", cluster.RGID) d.Set("rg_id", cluster.RGID)
d.Set("rg_name", cluster.RGName) d.Set("rg_name", cluster.RGName)
d.Set("status", cluster.Status) d.Set("status", cluster.Status)
@ -267,7 +268,7 @@ func flattenServiceAccount(serviceAccount k8s.RecordServiceAccount) []map[string
} }
func flattenWorkersGroup(workersGroups k8s.ListK8SGroups) []map[string]interface{} { func flattenWorkersGroup(workersGroups k8s.ListK8SGroups) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len (workersGroups)) res := make([]map[string]interface{}, 0, len(workersGroups))
for _, worker := range workersGroups { for _, worker := range workersGroups {
temp := map[string]interface{}{ temp := map[string]interface{}{
"annotations": worker.Annotations, "annotations": worker.Annotations,
@ -288,11 +289,11 @@ func flattenWorkersGroup(workersGroups k8s.ListK8SGroups) []map[string]interface
} }
func flattenK8sItems(k8sItems *k8s.ListK8SClusters) []map[string]interface{} { func flattenK8sItems(k8sItems *k8s.ListK8SClusters) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len (k8sItems.Data)) res := make([]map[string]interface{}, 0, len(k8sItems.Data))
for _, item := range k8sItems.Data { for _, item := range k8sItems.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"account_id": item.AccountID, "account_id": item.AccountID,
"account_name": item.Name, "account_name": item.AccountName,
"acl": item.ACL, "acl": item.ACL,
"bservice_id": item.BServiceID, "bservice_id": item.BServiceID,
"ci_id": item.CIID, "ci_id": item.CIID,
@ -308,6 +309,7 @@ func flattenK8sItems(k8sItems *k8s.ListK8SClusters) []map[string]interface{} {
"lb_id": item.LBID, "lb_id": item.LBID,
"milestones": item.Milestones, "milestones": item.Milestones,
"k8s_name": item.Name, "k8s_name": item.Name,
"network_plugin": item.NetworkPlugin,
"rg_id": item.RGID, "rg_id": item.RGID,
"rg_name": item.RGName, "rg_name": item.RGName,
"service_account": flattenServiceAccount(item.ServiceAccount), "service_account": flattenServiceAccount(item.ServiceAccount),

@ -85,21 +85,21 @@ func mastersSchemaMake() map[string]*schema.Schema {
Optional: true, Optional: true,
} }
masters["cpu"] = &schema.Schema{ masters["cpu"] = &schema.Schema{
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, //ForceNew: true,
Description: "Node CPU count.", Description: "Node CPU count.",
} }
masters["ram"] = &schema.Schema{ masters["ram"] = &schema.Schema{
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, //ForceNew: true,
Description: "Node RAM in MB.", Description: "Node RAM in MB.",
} }
masters["disk"] = &schema.Schema{ masters["disk"] = &schema.Schema{
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, //ForceNew: true,
Description: "Node boot disk size in GB.", Description: "Node boot disk size in GB.",
} }

@ -14,15 +14,15 @@ func resourceK8sCPSchemaV1() *schema.Resource {
Description: "Name of the cluster.", Description: "Name of the cluster.",
}, },
"rg_id": { "rg_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, //ForceNew: true,
Description: "Resource group ID that this instance belongs to.", Description: "Resource group ID that this instance belongs to.",
}, },
"k8sci_id": { "k8sci_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, //ForceNew: true,
Description: "ID of the k8s catalog item to base this instance on.", Description: "ID of the k8s catalog item to base this instance on.",
}, },
"network_plugin": { "network_plugin": {
@ -136,10 +136,10 @@ func resourceK8sCPSchemaV1() *schema.Resource {
}, },
//// ////
"extnet_id": { "extnet_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
ForceNew: true, //ForceNew: true,
Description: "ID of the external network to connect workers to. If omitted network will be chosen by the platfom.", Description: "ID of the external network to connect workers to. If omitted network will be chosen by the platfom.",
}, },
"desc": { "desc": {

@ -211,6 +211,7 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
resp, err := c.CloudAPI().K8S().Create(ctx, createReq) resp, err := c.CloudAPI().K8S().Create(ctx, createReq)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -316,7 +317,7 @@ func resourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{})
} }
} }
k8sList, err := utilityK8sListCheckPresence(ctx, d, m) k8sList, err := utilityK8sListForResourceCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("") d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
@ -515,7 +516,7 @@ func resourceK8sUpdate(ctx context.Context, d *schema.ResourceData, m interface{
} }
} }
return nil return resourceK8sRead(ctx, d, m)
} }
func resourceK8sDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceK8sDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -548,15 +549,15 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Description: "Name of the cluster.", Description: "Name of the cluster.",
}, },
"rg_id": { "rg_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, //ForceNew: true,
Description: "Resource group ID that this instance belongs to.", Description: "Resource group ID that this instance belongs to.",
}, },
"k8sci_id": { "k8sci_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, //ForceNew: true,
Description: "ID of the k8s catalog item to base this instance on.", Description: "ID of the k8s catalog item to base this instance on.",
}, },
"wg_name": { "wg_name": {
@ -595,7 +596,7 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Type: schema.TypeList, Type: schema.TypeList,
Optional: true, Optional: true,
Computed: true, Computed: true,
ForceNew: true, //ForceNew: true,
MaxItems: 1, MaxItems: 1,
Elem: &schema.Resource{ Elem: &schema.Resource{
Schema: mastersSchemaMake(), Schema: mastersSchemaMake(),
@ -624,10 +625,10 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Description: "Use only selected ExtNet for infrastructure connections", Description: "Use only selected ExtNet for infrastructure connections",
}, },
"extnet_id": { "extnet_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
ForceNew: true, //ForceNew: true,
Description: "ID of the external network to connect workers to. If omitted network will be chosen by the platfom.", Description: "ID of the external network to connect workers to. If omitted network will be chosen by the platfom.",
}, },

@ -197,6 +197,7 @@ func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interfac
resp, err := c.CloudAPI().K8S().Create(ctx, createReq) resp, err := c.CloudAPI().K8S().Create(ctx, createReq)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -317,7 +318,7 @@ func resourceK8sCPRead(ctx context.Context, d *schema.ResourceData, m interface{
} }
} }
k8sList, err := utilityK8sListCheckPresence(ctx, d, m) k8sList, err := utilityK8sListForResourceCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("") d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
@ -532,7 +533,7 @@ func resourceK8sCPUpdate(ctx context.Context, d *schema.ResourceData, m interfac
} }
} }
return nil return resourceK8sCPRead(ctx, d, m)
} }
func resourceK8sCPDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceK8sCPDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -540,6 +541,7 @@ func resourceK8sCPDelete(ctx context.Context, d *schema.ResourceData, m interfac
cluster, err := utilityK8sCheckPresence(ctx, d, m) cluster, err := utilityK8sCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -554,6 +556,8 @@ func resourceK8sCPDelete(ctx context.Context, d *schema.ResourceData, m interfac
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId("")
return nil return nil
} }
@ -565,15 +569,15 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
Description: "Name of the cluster.", Description: "Name of the cluster.",
}, },
"rg_id": { "rg_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, //ForceNew: true,
Description: "Resource group ID that this instance belongs to.", Description: "Resource group ID that this instance belongs to.",
}, },
"k8sci_id": { "k8sci_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, //ForceNew: true,
Description: "ID of the k8s catalog item to base this instance on.", Description: "ID of the k8s catalog item to base this instance on.",
}, },
"network_plugin": { "network_plugin": {
@ -681,10 +685,10 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
}, },
//// ////
"extnet_id": { "extnet_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
ForceNew: true, //ForceNew: true,
Description: "ID of the external network to connect workers to. If omitted network will be chosen by the platfom.", Description: "ID of the external network to connect workers to. If omitted network will be chosen by the platfom.",
}, },
"desc": { "desc": {

@ -201,7 +201,7 @@ func resourceK8sWgUpdate(ctx context.Context, d *schema.ResourceData, m interfac
} }
} }
return nil return resourceK8sWgRead(ctx, d, m)
} }
func resourceK8sWgDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceK8sWgDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -209,6 +209,7 @@ func resourceK8sWgDelete(ctx context.Context, d *schema.ResourceData, m interfac
wg, err := utilityK8sWgCheckPresence(ctx, d, m) wg, err := utilityK8sWgCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -223,22 +224,24 @@ func resourceK8sWgDelete(ctx context.Context, d *schema.ResourceData, m interfac
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId("")
return nil return nil
} }
func resourceK8sWgSchemaMake() map[string]*schema.Schema { func resourceK8sWgSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{ return map[string]*schema.Schema{
"k8s_id": { "k8s_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, //ForceNew: true,
Description: "ID of k8s instance.", Description: "ID of k8s instance.",
}, },
"name": { "name": {
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Required: true,
ForceNew: true, //ForceNew: true,
Description: "Name of the worker group.", Description: "Name of the worker group.",
}, },
@ -250,17 +253,17 @@ func resourceK8sWgSchemaMake() map[string]*schema.Schema {
}, },
"cpu": { "cpu": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
ForceNew: true, //ForceNew: true,
Default: 1, Default: 1,
Description: "Worker node CPU count.", Description: "Worker node CPU count.",
}, },
"ram": { "ram": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
ForceNew: true, //ForceNew: true,
Default: 1024, Default: 1024,
Description: "Worker node RAM in MB.", Description: "Worker node RAM in MB.",
}, },

@ -273,7 +273,22 @@ func utilityK8sListCheckPresence(ctx context.Context, d *schema.ResourceData, m
req.Size = uint64(size.(int)) req.Size = uint64(size.(int))
} }
k8sList, err := c.CloudAPI().K8S().List(ctx, req)
if err != nil {
return nil, err
}
return k8sList, nil
}
func utilityK8sListForResourceCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.ListK8SClusters, error) {
c := m.(*controller.ControllerCfg)
req := k8s.ListRequest{
IncludeDeleted: false,
}
if name, ok := d.GetOk("name"); ok {
req.Name = name.(string)
}
k8sList, err := c.CloudAPI().K8S().List(ctx, req) k8sList, err := c.CloudAPI().K8S().List(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err

@ -54,6 +54,7 @@ func findInExtraDisks(DiskId uint, ExtraDisks []interface{}) bool {
func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
compute, err := utilityDataComputeCheckPresence(ctx, d, m) compute, err := utilityDataComputeCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(strconv.Itoa(int(compute.ID))) d.SetId(strconv.Itoa(int(compute.ID)))
@ -290,6 +291,10 @@ func computeListDisksSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
}, },
"image_name": {
Type: schema.TypeString,
Computed: true,
},
"images": { "images": {
Type: schema.TypeList, Type: schema.TypeList,
Computed: true, Computed: true,
@ -444,6 +449,10 @@ func computeInterfacesSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"enabled": {
Type: schema.TypeBool,
Computed: true,
},
"flip_group_id": { "flip_group_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,

@ -44,6 +44,7 @@ import (
func dataSourceComputeAuditsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceComputeAuditsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeAudits, err := utilityComputeAuditsCheckPresence(ctx, d, m) computeAudits, err := utilityComputeAuditsCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()

@ -44,6 +44,7 @@ import (
func dataSourceComputeGetAuditsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceComputeGetAuditsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeAudits, err := utilityComputeGetAuditsCheckPresence(ctx, d, m) computeAudits, err := utilityComputeGetAuditsCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()

@ -45,12 +45,13 @@ import (
func dataSourceComputeGetConsoleUrlRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceComputeGetConsoleUrlRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeConsoleUrl, err := utilityComputeGetConsoleUrlCheckPresence(ctx, d, m) computeConsoleUrl, err := utilityComputeGetConsoleUrlCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()
d.SetId(id.String()) d.SetId(id.String())
result := strings.ReplaceAll(string(computeConsoleUrl), "\"", "") result := strings.ReplaceAll(computeConsoleUrl, "\"", "")
result = strings.ReplaceAll(string(result), "\\", "") result = strings.ReplaceAll(result, "\\", "")
d.Set("console_url", result) d.Set("console_url", result)
return nil return nil
} }

@ -44,6 +44,7 @@ import (
func dataSourceComputeGetLogRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceComputeGetLogRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeGetLog, err := utilityComputeGetLogCheckPresence(ctx, d, m) computeGetLog, err := utilityComputeGetLogCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()

@ -44,6 +44,7 @@ import (
func dataSourceComputeListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceComputeListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeList, err := utilityDataComputeListCheckPresence(ctx, d, m) computeList, err := utilityDataComputeListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }

@ -44,6 +44,7 @@ import (
func dataSourceComputeListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceComputeListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeList, err := utilityDataComputeListDeletedCheckPresence(ctx, d, m) computeList, err := utilityDataComputeListDeletedCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }

@ -44,6 +44,7 @@ import (
func dataSourceComputePCIDeviceListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceComputePCIDeviceListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computePCIDeviceList, err := utilityComputePCIDeviceListCheckPresence(ctx, d, m) computePCIDeviceList, err := utilityComputePCIDeviceListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()
@ -80,13 +81,13 @@ func dataSourceComputePCIDeviceListSchemaMake() map[string]*schema.Schema {
Description: "Find by status", Description: "Find by status",
}, },
"page": { "page": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Description: "Page number", Description: "Page number",
}, },
"size": { "size": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Description: "Page size", Description: "Page size",
}, },
"items": { "items": {

@ -44,6 +44,7 @@ import (
func dataSourceComputePfwListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceComputePfwListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computePfwList, err := utilityComputePfwListCheckPresence(ctx, d, m) computePfwList, err := utilityComputePfwListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()

@ -12,6 +12,7 @@ import (
func dataSourceComputeSnapshotUsageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceComputeSnapshotUsageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeSnapshotUsage, err := utilityComputeSnapshotUsageCheckPresence(ctx, d, m) computeSnapshotUsage, err := utilityComputeSnapshotUsageCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()

@ -44,6 +44,7 @@ import (
func dataSourceComputeUserListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceComputeUserListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeUserList, err := utilityComputeUserListCheckPresence(ctx, d, m) computeUserList, err := utilityComputeUserListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()

@ -44,6 +44,7 @@ import (
func dataSourceComputeVGPUListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceComputeVGPUListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeVGPUList, err := utilityComputeVGPUListCheckPresence(ctx, d, m) computeVGPUList, err := utilityComputeVGPUListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()
@ -75,18 +76,18 @@ func dataSourceComputeVGPUListSchemaMake() map[string]*schema.Schema {
Description: "Find by status", Description: "Find by status",
}, },
"includedeleted": { "includedeleted": {
Type: schema.TypeBool, Type: schema.TypeBool,
Optional: true, Optional: true,
Description: "Include deleted computes. If using field 'status', then includedeleted will be ignored", Description: "Include deleted computes. If using field 'status', then includedeleted will be ignored",
}, },
"page": { "page": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Description: "Page number", Description: "Page number",
}, },
"size": { "size": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Description: "Page size", Description: "Page size",
}, },
"items": { "items": {
@ -94,7 +95,6 @@ func dataSourceComputeVGPUListSchemaMake() map[string]*schema.Schema {
Computed: true, Computed: true,
Elem: &schema.Schema{ Elem: &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
}, },
}, },
"entry_count": { "entry_count": {

@ -72,6 +72,7 @@ func flattenInterfaces(interfaces compute.ListInterfaces) []map[string]interface
"conn_id": interfaceItem.ConnID, "conn_id": interfaceItem.ConnID,
"conn_type": interfaceItem.ConnType, "conn_type": interfaceItem.ConnType,
"def_gw": interfaceItem.DefGW, "def_gw": interfaceItem.DefGW,
"enabled": interfaceItem.Enabled,
"flip_group_id": interfaceItem.FLIPGroupID, "flip_group_id": interfaceItem.FLIPGroupID,
"guid": interfaceItem.GUID, "guid": interfaceItem.GUID,
"ip_address": interfaceItem.IPAddress, "ip_address": interfaceItem.IPAddress,
@ -362,7 +363,7 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) er
d.Set("stateless_sep_id", computeRec.StatelessSepID) d.Set("stateless_sep_id", computeRec.StatelessSepID)
d.Set("stateless_sep_type", computeRec.StatelessSepType) d.Set("stateless_sep_type", computeRec.StatelessSepType)
d.Set("status", computeRec.Status) d.Set("status", computeRec.Status)
d.Set("tags", flattenTags(computeRec.Tags)) // d.Set("tags", flattenTags(computeRec.Tags))
d.Set("tech_status", computeRec.TechStatus) d.Set("tech_status", computeRec.TechStatus)
d.Set("updated_by", computeRec.UpdatedBy) d.Set("updated_by", computeRec.UpdatedBy)
d.Set("updated_time", computeRec.UpdatedTime) d.Set("updated_time", computeRec.UpdatedTime)
@ -554,6 +555,7 @@ func flattenDataCompute(d *schema.ResourceData, computeRec compute.RecordCompute
d.Set("guid", computeRec.GUID) d.Set("guid", computeRec.GUID)
d.Set("compute_id", computeRec.ID) d.Set("compute_id", computeRec.ID)
d.Set("image_id", computeRec.ImageID) d.Set("image_id", computeRec.ImageID)
d.Set("image_name", computeRec.ImageName)
d.Set("interfaces", flattenInterfaces(computeRec.Interfaces)) d.Set("interfaces", flattenInterfaces(computeRec.Interfaces))
d.Set("lock_status", computeRec.LockStatus) d.Set("lock_status", computeRec.LockStatus)
d.Set("manager_id", computeRec.ManagerID) d.Set("manager_id", computeRec.ManagerID)
@ -653,19 +655,19 @@ func flattenSnapshotUsage(computeSnapshotUsages compute.ListUsageSnapshots) []ma
return res return res
} }
func flattenSnapshotList(computeSnapshotUsages *compute.ListSnapShots) []map[string]interface{} { // func flattenSnapshotList(computeSnapshotUsages *compute.ListSnapShots) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computeSnapshotUsages.Data)) // res := make([]map[string]interface{}, 0, len(computeSnapshotUsages.Data))
for _, computeUsage := range computeSnapshotUsages.Data { // for _, computeUsage := range computeSnapshotUsages.Data {
temp := map[string]interface{}{ // temp := map[string]interface{}{
"disks": computeUsage.Disks, // "disks": computeUsage.Disks,
"guid": computeUsage.GUID, // "guid": computeUsage.GUID,
"label": computeUsage.Label, // "label": computeUsage.Label,
"timestamp": computeUsage.Timestamp, // "timestamp": computeUsage.Timestamp,
} // }
res = append(res, temp) // res = append(res, temp)
} // }
return res // return res
} // }
func flattenVGPU(m []interface{}) []string { func flattenVGPU(m []interface{}) []string {
output := []string{} output := []string{}
@ -703,4 +705,4 @@ func flattenPCIDevice(m []interface{}) []string {
} }
} }
return output return output
} }

@ -155,6 +155,32 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
} }
} }
createReqPPC.Interfaces = make([]kvmppc.Interface, 0)
if networks, ok := d.GetOk("network"); ok {
if networks.(*schema.Set).Len() > 0 {
ns := networks.(*schema.Set).List()
interfaces := make([]kvmppc.Interface, 0)
for _, elem := range ns {
netInterfaceVal := elem.(map[string]interface{})
reqInterface := kvmppc.Interface{
NetType: netInterfaceVal["net_type"].(string),
NetID: uint64(netInterfaceVal["net_id"].(int)),
}
ipaddr, ipSet := netInterfaceVal["ip_address"]
if ipSet {
reqInterface.IPAddr = ipaddr.(string)
}
interfaces = append(interfaces, reqInterface)
}
createReqPPC.Interfaces = interfaces
}
}
argVal, ok = d.GetOk("cloud_init") argVal, ok = d.GetOk("cloud_init")
if ok { if ok {
userdata := argVal.(string) userdata := argVal.(string)
@ -242,33 +268,79 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
} }
} }
// Note bene: we created compute in a STOPPED state (this is required to properly attach 1st network interface), if disks, ok := d.GetOk("disks"); ok {
// now we need to start it before we report the sequence complete log.Debugf("resourceComputeCreate: Create disks on ComputeID: %d", computeId)
if d.Get("started").(bool) { addedDisks := disks.([]interface{})
req := compute.StartRequest{ComputeID: computeId} if len(addedDisks) > 0 {
log.Debugf("resourceComputeCreate: starting Compute ID %d after completing its resource configuration", computeId) for _, disk := range addedDisks {
if _, err := c.CloudAPI().Compute().Start(ctx, req); err != nil { diskConv := disk.(map[string]interface{})
warnings.Add(err) req := compute.DiskAddRequest{
ComputeID: computeId,
DiskName: diskConv["disk_name"].(string),
Size: uint64(diskConv["size"].(int)),
}
if diskConv["sep_id"].(int) != 0 {
req.SepID = uint64(diskConv["sep_id"].(int))
}
if diskConv["disk_type"].(string) != "" {
req.DiskType = diskConv["disk_type"].(string)
}
if diskConv["pool"].(string) != "" {
req.Pool = diskConv["pool"].(string)
}
if diskConv["desc"].(string) != "" {
req.Description = diskConv["desc"].(string)
}
if diskConv["image_id"].(int) != 0 {
req.ImageID = uint64(diskConv["image_id"].(int))
}
_, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
if err != nil {
cleanup = true
return diag.FromErr(err)
}
}
} }
} }
if enabled, ok := d.GetOk("enabled"); ok { if !cleanup {
if enabled.(bool) { if enabled, ok := d.GetOk("enabled"); ok {
req := compute.EnableRequest{ComputeID: computeId} if enabled.(bool) {
log.Debugf("resourceComputeCreate: enable=%t Compute ID %d after completing its resource configuration", enabled, computeId) req := compute.EnableRequest{ComputeID: computeId}
if _, err := c.CloudAPI().Compute().Enable(ctx, req); err != nil { log.Debugf("resourceComputeCreate: enable=%t Compute ID %d after completing its resource configuration", enabled, computeId)
warnings.Add(err) if _, err := c.CloudAPI().Compute().Enable(ctx, req); err != nil {
warnings.Add(err)
}
} else {
req := compute.DisableRequest{ComputeID: computeId}
log.Debugf("resourceComputeCreate: enable=%t Compute ID %d after completing its resource configuration", enabled, computeId)
if _, err := c.CloudAPI().Compute().Disable(ctx, req); err != nil {
warnings.Add(err)
}
} }
} else { }
req := compute.DisableRequest{ComputeID: computeId}
log.Debugf("resourceComputeCreate: enable=%t Compute ID %d after completing its resource configuration", enabled, computeId) // Note bene: we created compute in a STOPPED state (this is required to properly attach 1st network interface),
if _, err := c.CloudAPI().Compute().Disable(ctx, req); err != nil { // now we need to start it before we report the sequence complete
warnings.Add(err) if start, ok := d.GetOk("started"); ok {
if start.(bool) {
req := compute.StartRequest{ComputeID: computeId}
log.Debugf("resourceComputeCreate: starting Compute ID %d after completing its resource configuration", computeId)
if _, err := c.CloudAPI().Compute().Start(ctx, req); err != nil {
warnings.Add(err)
}
}
if !start.(bool) {
req := compute.StopRequest{ComputeID: computeId}
log.Debugf("resourceComputeCreate: stoping Compute ID %d after completing its resource configuration", computeId)
if _, err := c.CloudAPI().Compute().Stop(ctx, req); err != nil {
warnings.Add(err)
}
} }
} }
}
if !cleanup {
if affinityLabel, ok := d.GetOk("affinity_label"); ok { if affinityLabel, ok := d.GetOk("affinity_label"); ok {
req := compute.AffinityLabelSetRequest{ req := compute.AffinityLabelSetRequest{
ComputeID: computeId, ComputeID: computeId,
@ -281,41 +353,6 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
} }
} }
if disks, ok := d.GetOk("disks"); ok {
log.Debugf("resourceComputeCreate: Create disks on ComputeID: %d", computeId)
addedDisks := disks.([]interface{})
if len(addedDisks) > 0 {
for _, disk := range addedDisks {
diskConv := disk.(map[string]interface{})
req := compute.DiskAddRequest{
ComputeID: computeId,
DiskName: diskConv["disk_name"].(string),
Size: uint64(diskConv["size"].(int)),
SepID: uint64(diskConv["sep_id"].(int)),
}
if diskConv["disk_type"].(string) != "" {
req.DiskType = diskConv["disk_type"].(string)
}
if diskConv["pool"].(string) != "" {
req.Pool = diskConv["pool"].(string)
}
if diskConv["desc"].(string) != "" {
req.Description = diskConv["desc"].(string)
}
if diskConv["image_id"].(int) != 0 {
req.ImageID = uint64(diskConv["image_id"].(int))
}
_, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
if err != nil {
cleanup = true
return diag.FromErr(err)
}
}
}
}
if ars, ok := d.GetOk("affinity_rules"); ok { if ars, ok := d.GetOk("affinity_rules"); ok {
log.Debugf("resourceComputeCreate: Create affinity rules on ComputeID: %d", computeId) log.Debugf("resourceComputeCreate: Create affinity rules on ComputeID: %d", computeId)
addedAR := ars.([]interface{}) addedAR := ars.([]interface{})
@ -361,133 +398,134 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
} }
} }
} }
}
if tags, ok := d.GetOk("tags"); ok { if tags, ok := d.GetOk("tags"); ok {
log.Debugf("resourceComputeCreate: Create tags on ComputeID: %d", computeId) log.Debugf("resourceComputeCreate: Create tags on ComputeID: %d", computeId)
addedTags := tags.(*schema.Set).List() addedTags := tags.(*schema.Set).List()
if len(addedTags) > 0 { if len(addedTags) > 0 {
for _, tagInterface := range addedTags { for _, tagInterface := range addedTags {
tagItem := tagInterface.(map[string]interface{}) tagItem := tagInterface.(map[string]interface{})
req := compute.TagAddRequest{ req := compute.TagAddRequest{
ComputeID: computeId, ComputeID: computeId,
Key: tagItem["key"].(string), Key: tagItem["key"].(string),
Value: tagItem["value"].(string), Value: tagItem["value"].(string),
} }
_, err := c.CloudAPI().Compute().TagAdd(ctx, req) _, err := c.CloudAPI().Compute().TagAdd(ctx, req)
if err != nil { if err != nil {
warnings.Add(err) warnings.Add(err)
}
} }
} }
} }
}
if pfws, ok := d.GetOk("port_forwarding"); ok { if pfws, ok := d.GetOk("port_forwarding"); ok {
log.Debugf("resourceComputeCreate: Create port farwarding on ComputeID: %d", computeId) log.Debugf("resourceComputeCreate: Create port farwarding on ComputeID: %d", computeId)
addedPfws := pfws.(*schema.Set).List() addedPfws := pfws.(*schema.Set).List()
if len(addedPfws) > 0 { if len(addedPfws) > 0 {
for _, pfwInterface := range addedPfws { for _, pfwInterface := range addedPfws {
pfwItem := pfwInterface.(map[string]interface{}) pfwItem := pfwInterface.(map[string]interface{})
req := compute.PFWAddRequest{ req := compute.PFWAddRequest{
ComputeID: computeId, ComputeID: computeId,
PublicPortStart: uint64(pfwItem["public_port_start"].(int)), PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
PublicPortEnd: int64(pfwItem["public_port_end"].(int)), LocalBasePort: uint64(pfwItem["local_port"].(int)),
LocalBasePort: uint64(pfwItem["local_port"].(int)), Proto: pfwItem["proto"].(string),
Proto: pfwItem["proto"].(string), }
} if pfwItem["public_port_end"].(int) != 0 {
req.PublicPortEnd = int64(pfwItem["public_port_end"].(int))
}
_, err := c.CloudAPI().Compute().PFWAdd(ctx, req) _, err := c.CloudAPI().Compute().PFWAdd(ctx, req)
if err != nil { if err != nil {
warnings.Add(err) warnings.Add(err)
}
} }
} }
} }
}
if userAcess, ok := d.GetOk("user_access"); ok { if userAcess, ok := d.GetOk("user_access"); ok {
log.Debugf("resourceComputeCreate: Create user access on ComputeID: %d", computeId) log.Debugf("resourceComputeCreate: Create user access on ComputeID: %d", computeId)
usersAcess := userAcess.(*schema.Set).List() usersAcess := userAcess.(*schema.Set).List()
if len(usersAcess) > 0 { if len(usersAcess) > 0 {
for _, userAcessInterface := range usersAcess { for _, userAcessInterface := range usersAcess {
userAccessItem := userAcessInterface.(map[string]interface{}) userAccessItem := userAcessInterface.(map[string]interface{})
req := compute.UserGrantRequest{ req := compute.UserGrantRequest{
ComputeID: computeId, ComputeID: computeId,
Username: userAccessItem["username"].(string), Username: userAccessItem["username"].(string),
AccessType: userAccessItem["access_type"].(string), AccessType: userAccessItem["access_type"].(string),
}
_, err := c.CloudAPI().Compute().UserGrant(ctx, req)
if err != nil {
warnings.Add(err)
}
} }
}
}
_, err := c.CloudAPI().Compute().UserGrant(ctx, req) if snapshotList, ok := d.GetOk("snapshot"); ok {
if err != nil { log.Debugf("resourceComputeCreate: Create snapshot on ComputeID: %d", computeId)
warnings.Add(err) snapshots := snapshotList.(*schema.Set).List()
if len(snapshots) > 0 {
for _, snapshotInterface := range snapshots {
snapshotItem := snapshotInterface.(map[string]interface{})
req := compute.SnapshotCreateRequest{
ComputeID: computeId,
Label: snapshotItem["label"].(string),
}
_, err := c.CloudAPI().Compute().SnapshotCreate(ctx, req)
if err != nil {
warnings.Add(err)
}
} }
} }
} }
}
if snapshotList, ok := d.GetOk("snapshot"); ok { if cdtList, ok := d.GetOk("cd"); ok {
log.Debugf("resourceComputeCreate: Create snapshot on ComputeID: %d", computeId) log.Debugf("resourceComputeCreate: Create cd on ComputeID: %d", computeId)
snapshots := snapshotList.(*schema.Set).List() cds := cdtList.(*schema.Set).List()
if len(snapshots) > 0 { if len(cds) > 0 {
for _, snapshotInterface := range snapshots { snapshotItem := cds[0].(map[string]interface{})
snapshotItem := snapshotInterface.(map[string]interface{}) req := compute.CDInsertRequest{
req := compute.SnapshotCreateRequest{
ComputeID: computeId, ComputeID: computeId,
Label: snapshotItem["label"].(string), CDROMID: uint64(snapshotItem["cdrom_id"].(int)),
} }
_, err := c.CloudAPI().Compute().SnapshotCreate(ctx, req) _, err := c.CloudAPI().Compute().CDInsert(ctx, req)
if err != nil { if err != nil {
warnings.Add(err) warnings.Add(err)
} }
} }
} }
}
if cdtList, ok := d.GetOk("cd"); ok { if d.Get("pin_to_stack").(bool) {
log.Debugf("resourceComputeCreate: Create cd on ComputeID: %d", computeId) req := compute.PinToStackRequest{
cds := cdtList.(*schema.Set).List()
if len(cds) > 0 {
snapshotItem := cds[0].(map[string]interface{})
req := compute.CDInsertRequest{
ComputeID: computeId, ComputeID: computeId,
CDROMID: uint64(snapshotItem["cdrom_id"].(int)),
} }
_, err := c.CloudAPI().Compute().PinToStack(ctx, req)
_, err := c.CloudAPI().Compute().CDInsert(ctx, req)
if err != nil { if err != nil {
warnings.Add(err) warnings.Add(err)
} }
} }
}
if d.Get("pin_to_stack").(bool) == true {
req := compute.PinToStackRequest{
ComputeID: computeId,
}
_, err := c.CloudAPI().Compute().PinToStack(ctx, req)
if err != nil {
warnings.Add(err)
}
}
if d.Get("pause").(bool) == true { if d.Get("pause").(bool) {
req := compute.PauseRequest{ req := compute.PauseRequest{
ComputeID: computeId, ComputeID: computeId,
} }
_, err := c.CloudAPI().Compute().Pause(ctx, req) _, err := c.CloudAPI().Compute().Pause(ctx, req)
if err != nil { if err != nil {
warnings.Add(err) warnings.Add(err)
}
} }
} }
log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", computeId, d.Get("name").(string)) log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", computeId, d.Get("name").(string))
// We may reuse dataSourceComputeRead here as we maintain similarity // We may reuse dataSourceComputeRead here as we maintain similarity
// between Compute resource and Compute data source schemas // between Compute resource and Compute data source schemas
// Compute read function will also update resource ID on success, so that Terraform // Compute read function will also update resource ID on success, so that Terraform
// will know the resource exists // will know the resource exists
defer resourceComputeRead(ctx, d, m) return append(warnings.Get(), resourceComputeRead(ctx, d, m)...)
return warnings.Get()
} }
func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -498,6 +536,7 @@ func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interfac
computeRec, err := utilityComputeCheckPresence(ctx, d, m) computeRec, err := utilityComputeCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -601,6 +640,76 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
return diag.FromErr(err) return diag.FromErr(err)
} }
hasChanged := false
// check compute statuses
switch computeRec.Status {
case status.Deleted:
if restore, ok := d.GetOk("restore"); ok && restore.(bool) {
restoreReq := compute.RestoreRequest{ComputeID: computeRec.ID}
_, err := c.CloudAPI().Compute().Restore(ctx, restoreReq)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
}
if enabled, ok := d.GetOk("enabled"); ok {
if enabled.(bool) {
enableReq := compute.EnableRequest{ComputeID: computeRec.ID}
_, err = c.CloudAPI().Compute().Enable(ctx, enableReq)
if err != nil {
return diag.FromErr(err)
}
}
if !enabled.(bool) {
enableReq := compute.DisableRequest{ComputeID: computeRec.ID}
_, err = c.CloudAPI().Compute().Disable(ctx, enableReq)
if err != nil {
return diag.FromErr(err)
}
}
}
if start, ok := d.GetOk("started"); ok {
if start.(bool) {
req := compute.StartRequest{ComputeID: computeRec.ID}
if _, err := c.CloudAPI().Compute().Start(ctx, req); err != nil {
return diag.FromErr(err)
}
}
if !start.(bool) {
req := compute.StopRequest{ComputeID: computeRec.ID}
if _, err := c.CloudAPI().Compute().Stop(ctx, req); err != nil {
return diag.FromErr(err)
}
}
}
hasChanged = true
case status.Destroyed:
d.SetId("")
return diag.Errorf("The resource cannot be updated because it has been destroyed")
// return resourceComputeCreate(ctx, d, m)
case status.Disabled:
log.Debugf("The compute is in status: %s, may troubles can be occured with update. Please, enable compute first.", computeRec.Status)
case status.Redeploying:
case status.Deleting:
case status.Destroying:
return diag.Errorf("The compute is in progress with status: %s", computeRec.Status)
case status.Modeled:
return diag.Errorf("The compute is in status: %s, please, contant the support for more information", computeRec.Status)
}
if hasChanged {
computeRec, err = utilityComputeCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("enabled") { if d.HasChange("enabled") {
enabled := d.Get("enabled").(bool) enabled := d.Get("enabled").(bool)
if enabled { if enabled {
@ -623,34 +732,22 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
log.Debugf("resourceComputeUpdate: enable=%s Compute ID %v after completing its resource configuration", d.Id(), enabled) log.Debugf("resourceComputeUpdate: enable=%s Compute ID %v after completing its resource configuration", d.Id(), enabled)
} }
// check compute statuses if d.HasChange("started") {
switch computeRec.Status { start := d.Get("started").(bool)
case status.Deleted: if start {
restoreReq := compute.RestoreRequest{ComputeID: computeRec.ID} req := compute.StartRequest{ComputeID: computeRec.ID}
enableReq := compute.EnableRequest{ComputeID: computeRec.ID}
_, err := c.CloudAPI().Compute().Restore(ctx, restoreReq) if _, err := c.CloudAPI().Compute().Start(ctx, req); err != nil {
if err != nil { return diag.FromErr(err)
return diag.FromErr(err) }
} }
if !start {
req := compute.StopRequest{ComputeID: computeRec.ID}
_, err = c.CloudAPI().Compute().Enable(ctx, enableReq) if _, err := c.CloudAPI().Compute().Stop(ctx, req); err != nil {
if err != nil { return diag.FromErr(err)
return diag.FromErr(err) }
} }
case status.Destroyed:
d.SetId("")
return diag.Errorf("The resource cannot be updated because it has been destroyed")
// return resourceComputeCreate(ctx, d, m)
case status.Disabled:
log.Debugf("The compute is in status: %s, may troubles can be occured with update. Please, enable compute first.", computeRec.Status)
case status.Redeploying:
case status.Deleting:
case status.Destroying:
return diag.Errorf("The compute is in progress with status: %s", computeRec.Status)
case status.Modeled:
return diag.Errorf("The compute is in status: %s, please, contant the support for more information", computeRec.Status)
} }
doUpdate := false doUpdate := false
@ -729,14 +826,16 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
} }
} }
if d.HasChange("description") || d.HasChange("name") { if d.HasChanges("description", "name") {
req := compute.UpdateRequest{ req := compute.UpdateRequest{
ComputeID: computeRec.ID, ComputeID: computeRec.ID,
Name: d.Get("name").(string),
} }
if desc, ok := d.GetOk("description"); ok { if d.HasChange("name") {
req.Description = desc.(string) req.Name = d.Get("name").(string)
}
if d.HasChange("description") {
req.Description = d.Get("description").(string)
} }
if _, err := c.CloudAPI().Compute().Update(ctx, req); err != nil { if _, err := c.CloudAPI().Compute().Update(ctx, req); err != nil {
@ -835,9 +934,11 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
ComputeID: computeRec.ID, ComputeID: computeRec.ID,
DiskName: diskConv["disk_name"].(string), DiskName: diskConv["disk_name"].(string),
Size: uint64(diskConv["size"].(int)), Size: uint64(diskConv["size"].(int)),
SepID: uint64(diskConv["sep_id"].(int)),
} }
if diskConv["sep_id"].(int) != 0 {
req.SepID = uint64(diskConv["sep_id"].(int))
}
if diskConv["disk_type"].(string) != "" { if diskConv["disk_type"].(string) != "" {
req.DiskType = diskConv["disk_type"].(string) req.DiskType = diskConv["disk_type"].(string)
} }
@ -1267,7 +1368,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
if d.HasChange("pin_to_stack") { if d.HasChange("pin_to_stack") {
oldPin, newPin := d.GetChange("pin_to_stack") oldPin, newPin := d.GetChange("pin_to_stack")
if oldPin.(bool) == true && newPin.(bool) == false { if !newPin.(bool) {
req := compute.UnpinFromStackRequest{ req := compute.UnpinFromStackRequest{
ComputeID: computeRec.ID, ComputeID: computeRec.ID,
} }
@ -1277,7 +1378,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
return diag.FromErr(err) return diag.FromErr(err)
} }
} }
if oldPin.(bool) == false && newPin.(bool) == true { if !oldPin.(bool) {
req := compute.PinToStackRequest{ req := compute.PinToStackRequest{
ComputeID: computeRec.ID, ComputeID: computeRec.ID,
} }
@ -1291,7 +1392,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
if d.HasChange("pause") { if d.HasChange("pause") {
oldPause, newPause := d.GetChange("pause") oldPause, newPause := d.GetChange("pause")
if oldPause.(bool) == true && newPause.(bool) == false { if !newPause.(bool) {
req := compute.ResumeRequest{ req := compute.ResumeRequest{
ComputeID: computeRec.ID, ComputeID: computeRec.ID,
} }
@ -1300,7 +1401,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
return diag.FromErr(err) return diag.FromErr(err)
} }
} }
if oldPause.(bool) == false && newPause.(bool) == true { if !oldPause.(bool) {
req := compute.PauseRequest{ req := compute.PauseRequest{
ComputeID: computeRec.ID, ComputeID: computeRec.ID,
} }
@ -1313,8 +1414,8 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
} }
if d.HasChange("reset") { if d.HasChange("reset") {
oldReset, newReset := d.GetChange("reset") _, newReset := d.GetChange("reset")
if oldReset.(bool) == false && newReset.(bool) == true { if newReset.(bool) {
req := compute.ResetRequest{ req := compute.ResetRequest{
ComputeID: computeRec.ID, ComputeID: computeRec.ID,
} }
@ -1394,8 +1495,8 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
// we may reuse dataSourceComputeRead here as we maintain similarity // we may reuse dataSourceComputeRead here as we maintain similarity
// between Compute resource and Compute data source schemas // between Compute resource and Compute data source schemas
defer resourceComputeRead(ctx, d, m)
return warnings.Get() return append(warnings.Get(), resourceComputeRead(ctx, d, m)...)
} }
func isChangeDisk(els []interface{}, el interface{}) bool { func isChangeDisk(els []interface{}, el interface{}) bool {
@ -1456,6 +1557,8 @@ func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interf
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId("")
return nil return nil
} }
@ -1504,7 +1607,6 @@ func disksSubresourceSchemaMake() map[string]*schema.Schema {
}, },
"permanently": { "permanently": {
Type: schema.TypeBool, Type: schema.TypeBool,
Computed: true,
Optional: true, Optional: true,
Description: "Disk deletion status", Description: "Disk deletion status",
}, },
@ -1877,6 +1979,12 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Default: false, Default: false,
}, },
"restore": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"auto_start": { "auto_start": {
Type: schema.TypeBool, Type: schema.TypeBool,
Optional: true, Optional: true,

@ -34,18 +34,17 @@ package kvmvm
import ( import (
"context" "context"
"regexp"
"strconv"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"regexp"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
) )
func matchComputes(computeList *compute.ListComputes) *compute.ListComputes { func matchComputes(computeList *compute.ListComputes) *compute.ListComputes {
matched, _ := regexp.Compile("[a-zA-Z]+\\d+-[a-zA-Z]+\\d+-[a-zA-Z]+\\d+") matched, _ := regexp.Compile(`[a-zA-Z]+\\d+-[a-zA-Z]+\\d+-[a-zA-Z]+\\d+`)
result := computeList.FilterFunc(func(ic compute.ItemCompute) bool { result := computeList.FilterFunc(func(ic compute.ItemCompute) bool {
res := matched.Match([]byte(ic.Name)) res := matched.Match([]byte(ic.Name))
return !res return !res
@ -176,17 +175,6 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
old_set, new_set := d.GetChange("network") old_set, new_set := d.GetChange("network")
req := compute.StopRequest{
ComputeID: computeID,
Force: true,
}
log.Debugf("utilityComputeNetworksConfigure: stopping compute %d", computeID)
_, err := c.CloudAPI().Compute().Stop(ctx, req)
if err != nil {
return err
}
apiErrCount := 0 apiErrCount := 0
var lastSavedError error var lastSavedError error
@ -246,6 +234,17 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
} }
} }
needStart := false
if d.Get("network").(*schema.Set).Len() == 1 || old_set.(*schema.Set).Len() < 1 {
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
if err := utilityComputeStop(ctx, computeId, m); err != nil {
apiErrCount++
lastSavedError = err
}
needStart = true
}
attach_set := new_set.(*schema.Set).Difference(old_set.(*schema.Set)) attach_set := new_set.(*schema.Set).Difference(old_set.(*schema.Set))
log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", attach_set.Len(), d.Id()) log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", attach_set.Len(), d.Id())
for _, runner := range attach_set.List() { for _, runner := range attach_set.List() {
@ -270,13 +269,12 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
} }
} }
startReq := compute.StartRequest{ComputeID: computeID} if needStart {
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
log.Debugf("utilityComputeNetworksConfigure: starting compute %d", computeID) if numErr, err := utilityComputeStart(ctx, computeId, m); err != nil {
_, err = c.CloudAPI().Compute().Start(ctx, startReq) apiErrCount += numErr
if err != nil { lastSavedError = err
apiErrCount++ }
lastSavedError = err
} }
if apiErrCount > 0 { if apiErrCount > 0 {
@ -302,3 +300,30 @@ func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m
return *computeRecord, nil return *computeRecord, nil
} }
func utilityComputeStop(ctx context.Context, computeID uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
req := compute.StopRequest{
ComputeID: computeID,
Force: true,
}
log.Debugf("utilityComputeNetworksConfigure: stopping compute %d", computeID)
_, err := c.CloudAPI().Compute().Stop(ctx, req)
if err != nil {
return err
}
return nil
}
func utilityComputeStart(ctx context.Context, computeID uint64, m interface{}) (int, error) {
c := m.(*controller.ControllerCfg)
startReq := compute.StartRequest{ComputeID: computeID}
log.Debugf("utilityComputeNetworksConfigure: starting compute %d", computeID)
_, err := c.CloudAPI().Compute().Start(ctx, startReq)
if err != nil {
return 1, err
}
return 0, nil
}

@ -1,70 +1,71 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package lb package lb
import ( import (
"context" "context"
"strconv" "strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
lb, err := utilityLBCheckPresence(ctx, d, m) lb, err := utilityLBCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(lb.ID, 10))
d.SetId(strconv.FormatUint(lb.ID, 10))
flattenLB(d, lb)
flattenLB(d, lb)
return nil
} return nil
}
func DataSourceLB() *schema.Resource {
return &schema.Resource{ func DataSourceLB() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceLBRead,
ReadContext: dataSourceLBRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dsLBSchemaMake(),
} Schema: dsLBSchemaMake(),
} }
}

@ -1,70 +1,71 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package lb package lb
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceLBListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceLBListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
lbList, err := utilityLBListCheckPresence(ctx, d, m) lbList, err := utilityLBListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
id := uuid.New() }
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenLBList(lbList)) d.SetId(id.String())
d.Set("entry_count", lbList.EntryCount) d.Set("items", flattenLBList(lbList))
d.Set("entry_count", lbList.EntryCount)
return nil
} return nil
}
func DataSourceLBList() *schema.Resource {
return &schema.Resource{ func DataSourceLBList() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceLBListRead,
ReadContext: dataSourceLBListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dsLBListSchemaMake(),
} Schema: dsLBListSchemaMake(),
} }
}

@ -1,70 +1,71 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package lb package lb
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceLBListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceLBListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
lbList, err := utilityLBListDeletedCheckPresence(ctx, d, m) lbList, err := utilityLBListDeletedCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
id := uuid.New() }
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenLBList(lbList)) d.SetId(id.String())
d.Set("entry_count", lbList.EntryCount) d.Set("items", flattenLBList(lbList))
d.Set("entry_count", lbList.EntryCount)
return nil
} return nil
}
func DataSourceLBListDeleted() *schema.Resource {
return &schema.Resource{ func DataSourceLBListDeleted() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceLBListDeletedRead,
ReadContext: dataSourceLBListDeletedRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dsLBListDeletedSchemaMake(),
} Schema: dsLBListDeletedSchemaMake(),
} }
}

@ -1,432 +1,467 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package lb package lb
import ( import (
"context" "context"
"strconv" "strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
) "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
)
func resourceLBCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBCreate") func resourceLBCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBCreate")
haveRGID, err := existRGID(ctx, d, m)
if err != nil { haveRGID, err := existRGID(ctx, d, m)
return diag.FromErr(err) if err != nil {
} return diag.FromErr(err)
}
if !haveRGID {
return diag.Errorf("resourceLBCreate: can't create LB because RGID %d is not allowed or does not exist", d.Get("rg_id").(int)) if !haveRGID {
} return diag.Errorf("resourceLBCreate: can't create LB because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
}
haveExtNetID, err := existExtNetID(ctx, d, m)
if err != nil { haveExtNetID, err := existExtNetID(ctx, d, m)
return diag.FromErr(err) if err != nil {
} return diag.FromErr(err)
}
if !haveExtNetID {
return diag.Errorf("resourceLBCreate: can't create LB because ExtNetID %d is not allowed or does not exist", d.Get("extnet_id").(int)) if !haveExtNetID {
} return diag.Errorf("resourceLBCreate: can't create LB because ExtNetID %d is not allowed or does not exist", d.Get("extnet_id").(int))
}
haveVins, err := existViNSID(ctx, d, m)
if err != nil { haveVins, err := existViNSID(ctx, d, m)
return diag.FromErr(err) if err != nil {
} return diag.FromErr(err)
}
if !haveVins {
return diag.Errorf("resourceLBCreate: can't create LB because ViNSID %d is not allowed or does not exist", d.Get("vins_id").(int)) if !haveVins {
} return diag.Errorf("resourceLBCreate: can't create LB because ViNSID %d is not allowed or does not exist", d.Get("vins_id").(int))
}
c := m.(*controller.ControllerCfg)
req := lb.CreateRequest{ c := m.(*controller.ControllerCfg)
Name: d.Get("name").(string), req := lb.CreateRequest{
RGID: uint64(d.Get("rg_id").(int)), Name: d.Get("name").(string),
ExtNetID: uint64(d.Get("extnet_id").(int)), RGID: uint64(d.Get("rg_id").(int)),
VINSID: uint64(d.Get("vins_id").(int)), ExtNetID: uint64(d.Get("extnet_id").(int)),
Start: d.Get("start").(bool), VINSID: uint64(d.Get("vins_id").(int)),
} Start: d.Get("start").(bool),
}
if desc, ok := d.GetOk("desc"); ok {
req.Description = desc.(string) if desc, ok := d.GetOk("desc"); ok {
} req.Description = desc.(string)
}
if haMode, ok := d.GetOk("ha_mode"); ok {
req.HighlyAvailable = haMode.(bool) if haMode, ok := d.GetOk("ha_mode"); ok {
} req.HighlyAvailable = haMode.(bool)
}
lbId, err := c.CloudAPI().LB().Create(ctx, req)
if err != nil { lbId, err := c.CloudAPI().LB().Create(ctx, req)
return diag.FromErr(err) if err != nil {
} d.SetId("")
return diag.FromErr(err)
lbIdParsed := strconv.Itoa(int(lbId)) }
d.SetId(lbIdParsed) d.SetId(strconv.FormatUint(lbId, 10))
d.Set("lb_id", lbId) d.Set("lb_id", lbId)
_, err = utilityLBCheckPresence(ctx, d, m) w := dc.Warnings{}
if err != nil {
return diag.FromErr(err) if enable, ok := d.GetOk("enable"); ok {
} req := lb.DisableEnableRequest{
LBID: lbId,
if enable, ok := d.GetOk("enable"); ok { }
lbId := uint64(d.Get("lb_id").(int))
req := lb.DisableEnableRequest{ if enable.(bool) {
LBID: lbId, _, err := c.CloudAPI().LB().Enable(ctx, req)
} if err != nil {
w.Add(err)
if enable.(bool) { }
_, err := c.CloudAPI().LB().Enable(ctx, req) } else {
if err != nil { _, err := c.CloudAPI().LB().Disable(ctx, req)
return diag.FromErr(err) if err != nil {
} w.Add(err)
} else { }
_, err := c.CloudAPI().LB().Disable(ctx, req) }
if err != nil {
return diag.FromErr(err) if start, ok := d.GetOk("start"); ok && enable.(bool) {
} if start.(bool) {
} req := lb.StartRequest{LBID: lbId}
} _, err := c.CloudAPI().LB().Start(ctx, req)
if err != nil {
return resourceLBRead(ctx, d, m) w.Add(err)
} }
} else {
func resourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { req := lb.StopRequest{LBID: lbId}
log.Debugf("resourceLBRead") _, err := c.CloudAPI().LB().Stop(ctx, req)
if err != nil {
// c := m.(*controller.ControllerCfg) w.Add(err)
}
lbRec, err := utilityLBCheckPresence(ctx, d, m) }
if lbRec == nil { }
d.SetId("") }
return diag.FromErr(err)
} return append(w.Get(), resourceLBRead(ctx, d, m)...)
}
hasChanged := false
func resourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
switch lbRec.Status { log.Debugf("resourceLBRead")
case status.Modeled:
return diag.Errorf("The LB is in status: %s, please, contact support for more information", lbRec.Status) // c := m.(*controller.ControllerCfg)
case status.Creating:
case status.Created: lbRec, err := utilityLBCheckPresence(ctx, d, m)
case status.Deleting: if lbRec == nil {
case status.Deleted: d.SetId("")
// lbId, _ := strconv.ParseUint(d.Id(), 10, 64) return diag.FromErr(err)
// restoreReq := lb.RestoreRequest{LBID: lbId} }
// enableReq := lb.DisableEnableRequest{LBID: lbId}
hasChanged := false
// _, err := c.CloudAPI().LB().Restore(ctx, restoreReq)
// if err != nil { switch lbRec.Status {
// return diag.FromErr(err) case status.Modeled:
// } return diag.Errorf("The LB is in status: %s, please, contact support for more information", lbRec.Status)
// _, err = c.CloudAPI().LB().Enable(ctx, enableReq) case status.Creating:
// if err != nil { case status.Created:
// return diag.FromErr(err) case status.Deleting:
// } case status.Deleted:
// lbId, _ := strconv.ParseUint(d.Id(), 10, 64)
// hasChanged = true // restoreReq := lb.RestoreRequest{LBID: lbId}
case status.Destroying: // enableReq := lb.DisableEnableRequest{LBID: lbId}
return diag.Errorf("The LB is in progress with status: %s", lbRec.Status)
case status.Destroyed: // _, err := c.CloudAPI().LB().Restore(ctx, restoreReq)
d.SetId("") // if err != nil {
return diag.Errorf("The resource cannot be updated because it has been destroyed") // return diag.FromErr(err)
// return resourceLBCreate(ctx, d, m) // }
case status.Enabled: // _, err = c.CloudAPI().LB().Enable(ctx, enableReq)
case status.Enabling: // if err != nil {
case status.Disabling: // return diag.FromErr(err)
case status.Disabled: // }
log.Debugf("The LB is in status: %s, troubles may occur with update. Please, enable LB first.", lbRec.Status)
case status.Restoring: // hasChanged = true
} case status.Destroying:
return diag.Errorf("The LB is in progress with status: %s", lbRec.Status)
if hasChanged { case status.Destroyed:
lbRec, err = utilityLBCheckPresence(ctx, d, m) d.SetId("")
if err != nil { return diag.Errorf("The resource cannot be updated because it has been destroyed")
d.SetId("") // return resourceLBCreate(ctx, d, m)
return diag.FromErr(err) case status.Enabled:
} case status.Enabling:
} case status.Disabling:
case status.Disabled:
flattenResourceLB(d, lbRec) log.Debugf("The LB is in status: %s, troubles may occur with update. Please, enable LB first.", lbRec.Status)
case status.Restoring:
return nil }
}
if hasChanged {
func resourceLBDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { lbRec, err = utilityLBCheckPresence(ctx, d, m)
log.Debugf("resourceLBDelete") if err != nil {
d.SetId("")
_, err := utilityLBCheckPresence(ctx, d, m) return diag.FromErr(err)
if err != nil { }
return diag.FromErr(err) }
}
flattenResourceLB(d, lbRec)
c := m.(*controller.ControllerCfg)
req := lb.DeleteRequest{ return nil
LBID: uint64(d.Get("lb_id").(int)), }
}
func resourceLBDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
if permanently, ok := d.GetOk("permanently"); ok { log.Debugf("resourceLBDelete")
req.Permanently = permanently.(bool)
} c := m.(*controller.ControllerCfg)
lbId, _ := strconv.ParseUint(d.Id(), 10, 64)
_, err = c.CloudAPI().LB().Delete(ctx, req) req := lb.DeleteRequest{
if err != nil { LBID: lbId,
return diag.FromErr(err) }
}
if permanently, ok := d.GetOk("permanently"); ok {
d.SetId("") req.Permanently = permanently.(bool)
}
return nil
} _, err := c.CloudAPI().LB().Delete(ctx, req)
if err != nil {
func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { return diag.FromErr(err)
log.Debugf("resourceLBUpdate") }
c := m.(*controller.ControllerCfg)
d.SetId("")
haveRGID, err := existRGID(ctx, d, m)
if err != nil { return nil
return diag.FromErr(err) }
}
func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
if !haveRGID { log.Debugf("resourceLBUpdate")
return diag.Errorf("resourceLBUpdate: can't update LB because RGID %d is not allowed or does not exist", d.Get("rg_id").(int)) c := m.(*controller.ControllerCfg)
}
haveRGID, err := existRGID(ctx, d, m)
haveExtNetID, err := existExtNetID(ctx, d, m) if err != nil {
if err != nil { return diag.FromErr(err)
return diag.FromErr(err) }
}
if !haveRGID {
if !haveExtNetID { return diag.Errorf("resourceLBUpdate: can't update LB because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
return diag.Errorf("resourceLBUpdate: can't update LB because ExtNetID %d is not allowed or does not exist", d.Get("extnet_id").(int)) }
}
haveExtNetID, err := existExtNetID(ctx, d, m)
haveVins, err := existViNSID(ctx, d, m) if err != nil {
if err != nil { return diag.FromErr(err)
return diag.FromErr(err) }
}
if !haveExtNetID {
if !haveVins { return diag.Errorf("resourceLBUpdate: can't update LB because ExtNetID %d is not allowed or does not exist", d.Get("extnet_id").(int))
return diag.Errorf("resourceLBUpdate: can't update LB because ViNSID %d is not allowed or does not exist", d.Get("vins_id").(int)) }
}
haveVins, err := existViNSID(ctx, d, m)
lbRec, err := utilityLBCheckPresence(ctx, d, m) if err != nil {
if lbRec == nil { return diag.FromErr(err)
d.SetId("") }
return diag.FromErr(err)
} if !haveVins {
return diag.Errorf("resourceLBUpdate: can't update LB because ViNSID %d is not allowed or does not exist", d.Get("vins_id").(int))
hasChanged := false }
switch lbRec.Status { lbRec, err := utilityLBCheckPresence(ctx, d, m)
case status.Modeled: if lbRec == nil {
return diag.Errorf("The LB is in status: %s, please, contact support for more information", lbRec.Status) d.SetId("")
case status.Creating: return diag.FromErr(err)
case status.Created: }
case status.Deleting:
case status.Deleted: hasChanged := false
lbId, _ := strconv.ParseUint(d.Id(), 10, 64)
restoreReq := lb.RestoreRequest{LBID: lbId} switch lbRec.Status {
enableReq := lb.DisableEnableRequest{LBID: lbId} case status.Modeled:
return diag.Errorf("The LB is in status: %s, please, contact support for more information", lbRec.Status)
_, err := c.CloudAPI().LB().Restore(ctx, restoreReq) case status.Creating:
if err != nil { case status.Created:
return diag.FromErr(err) case status.Deleting:
} case status.Deleted:
_, err = c.CloudAPI().LB().Enable(ctx, enableReq) if restore, ok := d.GetOk("restore"); ok && restore.(bool) {
if err != nil { restoreReq := lb.RestoreRequest{LBID: lbRec.ID}
return diag.FromErr(err)
} _, err := c.CloudAPI().LB().Restore(ctx, restoreReq)
if err != nil {
hasChanged = true return diag.FromErr(err)
case status.Destroying: }
return diag.Errorf("The LB is in progress with status: %s", lbRec.Status) }
case status.Destroyed: if enable, ok := d.GetOk("enable"); ok {
d.SetId("") req := lb.DisableEnableRequest{
return diag.Errorf("The resource cannot be updated because it has been destroyed") LBID: lbRec.ID,
// return resourceLBCreate(ctx, d, m) }
case status.Enabled:
case status.Enabling: if enable.(bool) {
case status.Disabling: _, err := c.CloudAPI().LB().Enable(ctx, req)
case status.Disabled: if err != nil {
log.Debugf("The LB is in status: %s, troubles may occur with update. Please, enable LB first.", lbRec.Status) return diag.FromErr(err)
case status.Restoring: }
} } else {
_, err := c.CloudAPI().LB().Disable(ctx, req)
if hasChanged { if err != nil {
_, err = utilityLBCheckPresence(ctx, d, m) return diag.FromErr(err)
if err != nil { }
d.SetId("") }
return diag.FromErr(err)
} if start, ok := d.GetOk("start"); ok && enable.(bool) {
} if start.(bool) {
req := lb.StartRequest{LBID: lbRec.ID}
if d.HasChange("ha_mode") { _, err := c.CloudAPI().LB().Start(ctx, req)
hamode := d.Get("ha_mode").(bool) if err != nil {
if hamode { return diag.FromErr(err)
req:= lb.HighlyAvailableRequest { }
LBID: uint64(d.Get("lb_id").(int)), } else {
} req := lb.StopRequest{LBID: lbRec.ID}
_, err := c.CloudAPI().LB().HighlyAvailable(ctx, req) _, err := c.CloudAPI().LB().Stop(ctx, req)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
} }
} }
}
if d.HasChange("enable") { hasChanged = true
enable := d.Get("enable").(bool) case status.Destroying:
req := lb.DisableEnableRequest{ return diag.Errorf("The LB is in progress with status: %s", lbRec.Status)
LBID: uint64(d.Get("lb_id").(int)), case status.Destroyed:
} d.SetId("")
if enable { return diag.Errorf("The resource cannot be updated because it has been destroyed")
_, err := c.CloudAPI().LB().Enable(ctx, req) // return resourceLBCreate(ctx, d, m)
if err != nil { case status.Enabled:
return diag.FromErr(err) case status.Enabling:
} case status.Disabling:
} else { case status.Disabled:
_, err := c.CloudAPI().LB().Disable(ctx, req) log.Debugf("The LB is in status: %s, troubles may occur with update. Please, enable LB first.", lbRec.Status)
if err != nil { case status.Restoring:
return diag.FromErr(err) }
}
if hasChanged {
} _, err = utilityLBCheckPresence(ctx, d, m)
} if err != nil {
d.SetId("")
if d.HasChange("start") { return diag.FromErr(err)
start := d.Get("start").(bool) }
lbId := uint64(d.Get("lb_id").(int)) }
if start {
req := lb.StartRequest{LBID: lbId} if d.HasChange("ha_mode") {
_, err := c.CloudAPI().LB().Start(ctx, req) hamode := d.Get("ha_mode").(bool)
if err != nil { if hamode {
return diag.FromErr(err) req := lb.HighlyAvailableRequest{
} LBID: uint64(d.Get("lb_id").(int)),
} else { }
req := lb.StopRequest{LBID: lbId} _, err := c.CloudAPI().LB().HighlyAvailable(ctx, req)
_, err := c.CloudAPI().LB().Stop(ctx, req) if err != nil {
if err != nil { return diag.FromErr(err)
return diag.FromErr(err) }
} }
} }
}
if d.HasChange("enable") {
if d.HasChange("desc") { enable := d.Get("enable").(bool)
req := lb.UpdateRequest{ req := lb.DisableEnableRequest{
LBID: uint64(d.Get("lb_id").(int)), LBID: uint64(d.Get("lb_id").(int)),
Description: d.Get("desc").(string), }
} if enable {
_, err := c.CloudAPI().LB().Enable(ctx, req)
_, err := c.CloudAPI().LB().Update(ctx, req) if err != nil {
if err != nil { return diag.FromErr(err)
return diag.FromErr(err) }
} } else {
} _, err := c.CloudAPI().LB().Disable(ctx, req)
if err != nil {
if d.HasChange("restart") { return diag.FromErr(err)
restart := d.Get("restart").(bool) }
if restart { }
req := lb.RestartRequest{ }
LBID: uint64(d.Get("lb_id").(int)),
} if d.HasChange("start") {
start := d.Get("start").(bool)
_, err := c.CloudAPI().LB().Restart(ctx, req) lbId := uint64(d.Get("lb_id").(int))
if err != nil { if start {
return diag.FromErr(err) req := lb.StartRequest{LBID: lbId}
} _, err := c.CloudAPI().LB().Start(ctx, req)
} if err != nil {
} return diag.FromErr(err)
}
if d.HasChange("restore") { } else {
restore := d.Get("restore").(bool) req := lb.StopRequest{LBID: lbId}
if restore { _, err := c.CloudAPI().LB().Stop(ctx, req)
req := lb.RestoreRequest{ if err != nil {
LBID: uint64(d.Get("lb_id").(int)), return diag.FromErr(err)
} }
}
_, err := c.CloudAPI().LB().Restore(ctx, req) }
if err != nil {
return diag.FromErr(err) if d.HasChange("desc") {
} req := lb.UpdateRequest{
} LBID: uint64(d.Get("lb_id").(int)),
} Description: d.Get("desc").(string),
}
if d.HasChange("config_reset") {
cfgReset := d.Get("config_reset").(bool) _, err := c.CloudAPI().LB().Update(ctx, req)
if cfgReset { if err != nil {
req := lb.ConfigResetRequest{ return diag.FromErr(err)
LBID: uint64(d.Get("lb_id").(int)), }
} }
_, err := c.CloudAPI().LB().ConfigReset(ctx, req) if d.HasChange("restart") {
if err != nil { restart := d.Get("restart").(bool)
return diag.FromErr(err) if restart {
} req := lb.RestartRequest{
} LBID: uint64(d.Get("lb_id").(int)),
} }
return resourceLBRead(ctx, d, m) _, err := c.CloudAPI().LB().Restart(ctx, req)
} if err != nil {
return diag.FromErr(err)
func ResourceLB() *schema.Resource { }
return &schema.Resource{ }
SchemaVersion: 1, }
CreateContext: resourceLBCreate, if d.HasChange("restore") {
ReadContext: resourceLBRead, restore := d.Get("restore").(bool)
UpdateContext: resourceLBUpdate, if restore {
DeleteContext: resourceLBDelete, req := lb.RestoreRequest{
LBID: uint64(d.Get("lb_id").(int)),
Importer: &schema.ResourceImporter{ }
StateContext: schema.ImportStatePassthroughContext,
}, _, err := c.CloudAPI().LB().Restore(ctx, req)
if err != nil {
Timeouts: &schema.ResourceTimeout{ return diag.FromErr(err)
Create: &constants.Timeout600s, }
Read: &constants.Timeout300s, }
Update: &constants.Timeout300s, }
Delete: &constants.Timeout300s,
Default: &constants.Timeout300s, if d.HasChange("config_reset") {
}, cfgReset := d.Get("config_reset").(bool)
if cfgReset {
Schema: lbResourceSchemaMake(), req := lb.ConfigResetRequest{
} LBID: uint64(d.Get("lb_id").(int)),
} }
_, err := c.CloudAPI().LB().ConfigReset(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
return resourceLBRead(ctx, d, m)
}
func ResourceLB() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourceLBCreate,
ReadContext: resourceLBRead,
UpdateContext: resourceLBUpdate,
DeleteContext: resourceLBDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout600s,
Read: &constants.Timeout300s,
Update: &constants.Timeout300s,
Delete: &constants.Timeout300s,
Default: &constants.Timeout300s,
},
Schema: lbResourceSchemaMake(),
}
}

@ -1,374 +1,379 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package lb package lb
import ( import (
"context" "context"
"strconv" "strconv"
"strings" "strings"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
) )
func resourceLBBackendCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBBackendCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendCreate") log.Debugf("resourceLBBackendCreate")
haveLBID, err := existLBID(ctx, d, m) haveLBID, err := existLBID(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
if !haveLBID { if !haveLBID {
return diag.Errorf("resourceLBBackendCreate: can't create LB backend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) return diag.Errorf("resourceLBBackendCreate: can't create LB backend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
} }
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
req := lb.BackendCreateRequest{} req := lb.BackendCreateRequest{}
req.BackendName = d.Get("name").(string) req.BackendName = d.Get("name").(string)
req.LBID = uint64(d.Get("lb_id").(int)) req.LBID = uint64(d.Get("lb_id").(int))
if algorithm, ok := d.GetOk("algorithm"); ok { if algorithm, ok := d.GetOk("algorithm"); ok {
req.Algorithm = algorithm.(string) req.Algorithm = algorithm.(string)
} }
if inter, ok := d.GetOk("inter"); ok { if inter, ok := d.GetOk("inter"); ok {
req.Inter = uint64(inter.(int)) req.Inter = uint64(inter.(int))
} }
if downinter, ok := d.GetOk("downinter"); ok { if downinter, ok := d.GetOk("downinter"); ok {
req.DownInter = uint64(downinter.(int)) req.DownInter = uint64(downinter.(int))
} }
if rise, ok := d.GetOk("rise"); ok { if rise, ok := d.GetOk("rise"); ok {
req.Rise = uint64(rise.(int)) req.Rise = uint64(rise.(int))
} }
if fall, ok := d.GetOk("fall"); ok { if fall, ok := d.GetOk("fall"); ok {
req.Fall = uint64(fall.(int)) req.Fall = uint64(fall.(int))
} }
if slowstart, ok := d.GetOk("slowstart"); ok { if slowstart, ok := d.GetOk("slowstart"); ok {
req.SlowStart = uint64(slowstart.(int)) req.SlowStart = uint64(slowstart.(int))
} }
if maxconn, ok := d.GetOk("maxconn"); ok { if maxconn, ok := d.GetOk("maxconn"); ok {
req.MaxConn = uint64(maxconn.(int)) req.MaxConn = uint64(maxconn.(int))
} }
if maxqueue, ok := d.GetOk("maxqueue"); ok { if maxqueue, ok := d.GetOk("maxqueue"); ok {
req.MaxQueue = uint64(maxqueue.(int)) req.MaxQueue = uint64(maxqueue.(int))
} }
if weight, ok := d.GetOk("weight"); ok { if weight, ok := d.GetOk("weight"); ok {
req.Weight = uint64(weight.(int)) req.Weight = uint64(weight.(int))
} }
_, err = c.CloudAPI().LB().BackendCreate(ctx, req) _, err = c.CloudAPI().LB().BackendCreate(ctx, req)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("name").(string))
d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("name").(string))
_, err = utilityLBBackendCheckPresence(ctx, d, m)
if err != nil { _, err = utilityLBBackendCheckPresence(ctx, d, m)
return diag.FromErr(err) if err != nil {
} d.SetId("")
return diag.FromErr(err)
return resourceLBBackendRead(ctx, d, m) }
}
return resourceLBBackendRead(ctx, d, m)
func resourceLBBackendRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { }
log.Debugf("resourceLBBackendRead")
func resourceLBBackendRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
b, err := utilityLBBackendCheckPresence(ctx, d, m) log.Debugf("resourceLBBackendRead")
if b == nil {
d.SetId("") b, err := utilityLBBackendCheckPresence(ctx, d, m)
return diag.FromErr(err) if b == nil {
} d.SetId("")
return diag.FromErr(err)
lbId, _ := strconv.ParseInt(strings.Split(d.Id(), "#")[0], 10, 32) }
flattenResourceLBBackend(d, b, lbId) lbId, _ := strconv.ParseInt(strings.Split(d.Id(), "#")[0], 10, 32)
return nil flattenResourceLBBackend(d, b, lbId)
}
return nil
func resourceLBBackendDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { }
log.Debugf("resourceLBBackendDelete")
func resourceLBBackendDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
_, err := utilityLBBackendCheckPresence(ctx, d, m) log.Debugf("resourceLBBackendDelete")
if err != nil {
return diag.FromErr(err) _, err := utilityLBBackendCheckPresence(ctx, d, m)
} if err != nil {
d.SetId("")
c := m.(*controller.ControllerCfg) return diag.FromErr(err)
req := lb.BackendDeleteRequest{ }
LBID: uint64(d.Get("lb_id").(int)),
BackendName: d.Get("name").(string), c := m.(*controller.ControllerCfg)
} req := lb.BackendDeleteRequest{
LBID: uint64(d.Get("lb_id").(int)),
_, err = c.CloudAPI().LB().BackendDelete(ctx, req) BackendName: d.Get("name").(string),
if err != nil { }
return diag.FromErr(err)
} _, err = c.CloudAPI().LB().BackendDelete(ctx, req)
if err != nil {
d.SetId("") d.SetId("")
return diag.FromErr(err)
return nil }
}
d.SetId("")
func resourceLBBackendUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendEdit") return nil
c := m.(*controller.ControllerCfg) }
haveLBID, err := existLBID(ctx, d, m) func resourceLBBackendUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
if err != nil { log.Debugf("resourceLBBackendEdit")
return diag.FromErr(err) c := m.(*controller.ControllerCfg)
}
haveLBID, err := existLBID(ctx, d, m)
if !haveLBID { if err != nil {
return diag.Errorf("resourceLBBackendUpdate: can't update LB backend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) return diag.FromErr(err)
} }
req := lb.BackendUpdateRequest{ if !haveLBID {
LBID: uint64(d.Get("lb_id").(int)), return diag.Errorf("resourceLBBackendUpdate: can't update LB backend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
BackendName: d.Get("name").(string), }
}
req := lb.BackendUpdateRequest{
if d.HasChange("algorithm") { LBID: uint64(d.Get("lb_id").(int)),
req.Algorithm = d.Get("algorithm").(string) BackendName: d.Get("name").(string),
} }
if d.HasChange("inter") {
req.Inter = uint64(d.Get("inter").(int)) if d.HasChange("algorithm") {
} req.Algorithm = d.Get("algorithm").(string)
if d.HasChange("downinter") { }
req.DownInter = uint64(d.Get("downinter").(int)) if d.HasChange("inter") {
} req.Inter = uint64(d.Get("inter").(int))
if d.HasChange("rise") { }
req.Rise = uint64(d.Get("rise").(int)) if d.HasChange("downinter") {
} req.DownInter = uint64(d.Get("downinter").(int))
if d.HasChange("fall") { }
req.Fall = uint64(d.Get("fall").(int)) if d.HasChange("rise") {
} req.Rise = uint64(d.Get("rise").(int))
if d.HasChange("slowstart") { }
req.SlowStart = uint64(d.Get("slowstart").(int)) if d.HasChange("fall") {
} req.Fall = uint64(d.Get("fall").(int))
if d.HasChange("maxconn") { }
req.MaxConn = uint64(d.Get("maxconn").(int)) if d.HasChange("slowstart") {
} req.SlowStart = uint64(d.Get("slowstart").(int))
if d.HasChange("maxqueue") { }
req.MaxQueue = uint64(d.Get("maxqueue").(int)) if d.HasChange("maxconn") {
} req.MaxConn = uint64(d.Get("maxconn").(int))
if d.HasChange("weight") { }
req.Weight = uint64(d.Get("weight").(int)) if d.HasChange("maxqueue") {
} req.MaxQueue = uint64(d.Get("maxqueue").(int))
}
_, err = c.CloudAPI().LB().BackendUpdate(ctx, req) if d.HasChange("weight") {
if err != nil { req.Weight = uint64(d.Get("weight").(int))
return diag.FromErr(err) }
}
_, err = c.CloudAPI().LB().BackendUpdate(ctx, req)
return resourceLBBackendRead(ctx, d, m) if err != nil {
} d.SetId("")
return diag.FromErr(err)
func ResourceLBBackend() *schema.Resource { }
return &schema.Resource{
SchemaVersion: 1, return resourceLBBackendRead(ctx, d, m)
}
CreateContext: resourceLBBackendCreate,
ReadContext: resourceLBBackendRead, func ResourceLBBackend() *schema.Resource {
UpdateContext: resourceLBBackendUpdate, return &schema.Resource{
DeleteContext: resourceLBBackendDelete, SchemaVersion: 1,
Importer: &schema.ResourceImporter{ CreateContext: resourceLBBackendCreate,
StateContext: schema.ImportStatePassthroughContext, ReadContext: resourceLBBackendRead,
}, UpdateContext: resourceLBBackendUpdate,
DeleteContext: resourceLBBackendDelete,
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout600s, Importer: &schema.ResourceImporter{
Read: &constants.Timeout300s, StateContext: schema.ImportStatePassthroughContext,
Update: &constants.Timeout300s, },
Delete: &constants.Timeout300s,
Default: &constants.Timeout300s, Timeouts: &schema.ResourceTimeout{
}, Create: &constants.Timeout600s,
Read: &constants.Timeout300s,
Schema: map[string]*schema.Schema{ Update: &constants.Timeout300s,
"lb_id": { Delete: &constants.Timeout300s,
Type: schema.TypeInt, Default: &constants.Timeout300s,
Required: true, },
Description: "ID of the LB instance to backendCreate",
}, Schema: map[string]*schema.Schema{
"name": { "lb_id": {
Type: schema.TypeString, Type: schema.TypeInt,
Required: true, Required: true,
Description: "Must be unique among all backends of this LB - name of the new backend to create", Description: "ID of the LB instance to backendCreate",
}, },
"algorithm": { "name": {
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Required: true,
Computed: true, Description: "Must be unique among all backends of this LB - name of the new backend to create",
ValidateFunc: validation.StringInSlice([]string{"roundrobin", "static-rr", "leastconn"}, false), },
}, "algorithm": {
"guid": { Type: schema.TypeString,
Type: schema.TypeString, Optional: true,
Computed: true, Computed: true,
}, ValidateFunc: validation.StringInSlice([]string{"roundrobin", "static-rr", "leastconn"}, false),
"downinter": { },
Type: schema.TypeInt, "guid": {
Optional: true, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"fall": { "downinter": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
}, },
"inter": { "fall": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
}, },
"maxconn": { "inter": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
}, },
"maxqueue": { "maxconn": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
}, },
"rise": { "maxqueue": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
}, },
"slowstart": { "rise": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
}, },
"weight": { "slowstart": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
}, },
"servers": { "weight": {
Type: schema.TypeList, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
Elem: &schema.Resource{ },
Schema: map[string]*schema.Schema{ "servers": {
"address": { Type: schema.TypeList,
Type: schema.TypeString, Optional: true,
Optional: true, Computed: true,
Computed: true, Elem: &schema.Resource{
}, Schema: map[string]*schema.Schema{
"check": { "address": {
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Optional: true,
Computed: true, Computed: true,
}, },
"guid": { "check": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Optional: true,
}, Computed: true,
"name": { },
Type: schema.TypeString, "guid": {
Optional: true, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"port": { "name": {
Type: schema.TypeInt, Type: schema.TypeString,
Optional: true, Optional: true,
Computed: true, Computed: true,
}, },
"server_settings": { "port": {
Type: schema.TypeList, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
Elem: &schema.Resource{ },
Schema: map[string]*schema.Schema{ "server_settings": {
"downinter": { Type: schema.TypeList,
Type: schema.TypeInt, Optional: true,
Optional: true, Computed: true,
Computed: true, Elem: &schema.Resource{
}, Schema: map[string]*schema.Schema{
"fall": { "downinter": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
}, },
"guid": { "fall": {
Type: schema.TypeString, Type: schema.TypeInt,
Computed: true, Optional: true,
}, Computed: true,
"inter": { },
Type: schema.TypeInt, "guid": {
Optional: true, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"maxconn": { "inter": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
}, },
"maxqueue": { "maxconn": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
}, },
"rise": { "maxqueue": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
}, },
"slowstart": { "rise": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
}, },
"weight": { "slowstart": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
}, },
}, "weight": {
}, Type: schema.TypeInt,
}, Optional: true,
}, Computed: true,
}, },
}, },
}, },
} },
} },
},
},
},
}
}

@ -1,314 +1,320 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package lb package lb
import ( import (
"context" "context"
"strconv" "strconv"
"strings" "strings"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
) )
func resourceLBBackendServerCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBBackendServerCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendServerCreate") log.Debugf("resourceLBBackendServerCreate")
haveLBID, err := existLBID(ctx, d, m) haveLBID, err := existLBID(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
if !haveLBID { if !haveLBID {
return diag.Errorf("resourceLBBackendServerCreate: can't create LB backend server because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) return diag.Errorf("resourceLBBackendServerCreate: can't create LB backend server because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
} }
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
req := lb.BackendServerAddRequest{ req := lb.BackendServerAddRequest{
BackendName: d.Get("backend_name").(string), BackendName: d.Get("backend_name").(string),
ServerName: d.Get("name").(string), ServerName: d.Get("name").(string),
Address: d.Get("address").(string), Address: d.Get("address").(string),
LBID: uint64(d.Get("lb_id").(int)), LBID: uint64(d.Get("lb_id").(int)),
Port: uint64(d.Get("port").(int)), Port: uint64(d.Get("port").(int)),
} }
if check, ok := d.GetOk("check"); ok { if check, ok := d.GetOk("check"); ok {
req.Check = check.(string) req.Check = check.(string)
} }
if inter, ok := d.GetOk("inter"); ok { if inter, ok := d.GetOk("inter"); ok {
req.Inter = uint64(inter.(int)) req.Inter = uint64(inter.(int))
} }
if downinter, ok := d.GetOk("downinter"); ok { if downinter, ok := d.GetOk("downinter"); ok {
req.DownInter = uint64(downinter.(int)) req.DownInter = uint64(downinter.(int))
} }
if rise, ok := d.GetOk("rise"); ok { if rise, ok := d.GetOk("rise"); ok {
req.Rise = uint64(rise.(int)) req.Rise = uint64(rise.(int))
} }
if fall, ok := d.GetOk("fall"); ok { if fall, ok := d.GetOk("fall"); ok {
req.Fall = uint64(fall.(int)) req.Fall = uint64(fall.(int))
} }
if slowstart, ok := d.GetOk("slowstart"); ok { if slowstart, ok := d.GetOk("slowstart"); ok {
req.SlowStart = uint64(slowstart.(int)) req.SlowStart = uint64(slowstart.(int))
} }
if maxconn, ok := d.GetOk("maxconn"); ok { if maxconn, ok := d.GetOk("maxconn"); ok {
req.MaxConn = uint64(maxconn.(int)) req.MaxConn = uint64(maxconn.(int))
} }
if maxqueue, ok := d.GetOk("maxqueue"); ok { if maxqueue, ok := d.GetOk("maxqueue"); ok {
req.MaxQueue = uint64(maxqueue.(int)) req.MaxQueue = uint64(maxqueue.(int))
} }
if weight, ok := d.GetOk("weight"); ok { if weight, ok := d.GetOk("weight"); ok {
req.Weight = uint64(weight.(int)) req.Weight = uint64(weight.(int))
} }
_, err = c.CloudAPI().LB().BackendServerAdd(ctx, req) _, err = c.CloudAPI().LB().BackendServerAdd(ctx, req)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("backend_name").(string) + "#" + d.Get("name").(string))
d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("backend_name").(string) + "#" + d.Get("name").(string))
_, err = utilityLBBackendServerCheckPresence(ctx, d, m)
if err != nil { _, err = utilityLBBackendServerCheckPresence(ctx, d, m)
return diag.FromErr(err) if err != nil {
} d.SetId("")
return diag.FromErr(err)
return resourceLBBackendServerRead(ctx, d, m) }
}
return resourceLBBackendServerRead(ctx, d, m)
func resourceLBBackendServerRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { }
log.Debugf("resourceLBBackendServerRead")
func resourceLBBackendServerRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
s, err := utilityLBBackendServerCheckPresence(ctx, d, m) log.Debugf("resourceLBBackendServerRead")
if err != nil {
d.SetId("") s, err := utilityLBBackendServerCheckPresence(ctx, d, m)
return diag.FromErr(err) if err != nil {
} d.SetId("")
return diag.FromErr(err)
lbId, _ := strconv.ParseInt(strings.Split(d.Id(), "#")[0], 10, 32) }
backendName := strings.Split(d.Id(), "#")[1]
lbId, _ := strconv.ParseInt(strings.Split(d.Id(), "#")[0], 10, 32)
flattenResourceLBBackendServer(d, s, lbId, backendName) backendName := strings.Split(d.Id(), "#")[1]
return nil flattenResourceLBBackendServer(d, s, lbId, backendName)
}
return nil
func resourceLBBackendServerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { }
log.Debugf("resourceLBBackendServerDelete")
func resourceLBBackendServerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
_, err := utilityLBBackendServerCheckPresence(ctx, d, m) log.Debugf("resourceLBBackendServerDelete")
if err != nil {
return diag.FromErr(err) _, err := utilityLBBackendServerCheckPresence(ctx, d, m)
} if err != nil {
d.SetId("")
c := m.(*controller.ControllerCfg) return diag.FromErr(err)
req := lb.BackendServerDeleteRequest{ }
LBID: uint64(d.Get("lb_id").(int)),
BackendName: d.Get("backend_name").(string), c := m.(*controller.ControllerCfg)
ServerName: d.Get("name").(string), req := lb.BackendServerDeleteRequest{
} LBID: uint64(d.Get("lb_id").(int)),
BackendName: d.Get("backend_name").(string),
_, err = c.CloudAPI().LB().BackendServerDelete(ctx, req) ServerName: d.Get("name").(string),
if err != nil { }
return diag.FromErr(err)
} _, err = c.CloudAPI().LB().BackendServerDelete(ctx, req)
d.SetId("") if err != nil {
d.SetId("")
return nil return diag.FromErr(err)
} }
d.SetId("")
func resourceLBBackendServerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendServerEdit") return nil
c := m.(*controller.ControllerCfg) }
haveLBID, err := existLBID(ctx, d, m) func resourceLBBackendServerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
if err != nil { log.Debugf("resourceLBBackendServerEdit")
return diag.FromErr(err) c := m.(*controller.ControllerCfg)
}
haveLBID, err := existLBID(ctx, d, m)
if !haveLBID { if err != nil {
return diag.Errorf("resourceLBBackendServerUpdate: can't update LB backend server because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) d.SetId("")
} return diag.FromErr(err)
}
req := lb.BackendServerUpdateRequest{
BackendName: d.Get("backend_name").(string), if !haveLBID {
LBID: uint64(d.Get("lb_id").(int)), return diag.Errorf("resourceLBBackendServerUpdate: can't update LB backend server because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
ServerName: d.Get("name").(string), }
Address: d.Get("address").(string),
Port: uint64(d.Get("port").(int)), req := lb.BackendServerUpdateRequest{
} BackendName: d.Get("backend_name").(string),
LBID: uint64(d.Get("lb_id").(int)),
if d.HasChange("check") { ServerName: d.Get("name").(string),
req.Check = d.Get("check").(string) Address: d.Get("address").(string),
} Port: uint64(d.Get("port").(int)),
if d.HasChange("inter") { }
req.Inter = uint64(d.Get("inter").(int))
} if d.HasChange("check") {
if d.HasChange("downinter") { req.Check = d.Get("check").(string)
req.DownInter = uint64(d.Get("downinter").(int)) }
} if d.HasChange("inter") {
if d.HasChange("rise") { req.Inter = uint64(d.Get("inter").(int))
req.Rise = uint64(d.Get("rise").(int)) }
} if d.HasChange("downinter") {
if d.HasChange("fall") { req.DownInter = uint64(d.Get("downinter").(int))
req.Fall = uint64(d.Get("fall").(int)) }
} if d.HasChange("rise") {
if d.HasChange("slowstart") { req.Rise = uint64(d.Get("rise").(int))
req.SlowStart = uint64(d.Get("slowstart").(int)) }
} if d.HasChange("fall") {
if d.HasChange("maxconn") { req.Fall = uint64(d.Get("fall").(int))
req.MaxConn = uint64(d.Get("maxconn").(int)) }
} if d.HasChange("slowstart") {
if d.HasChange("maxqueue") { req.SlowStart = uint64(d.Get("slowstart").(int))
req.MaxQueue = uint64(d.Get("maxqueue").(int)) }
} if d.HasChange("maxconn") {
if d.HasChange("weight") { req.MaxConn = uint64(d.Get("maxconn").(int))
req.Weight = uint64(d.Get("weight").(int)) }
} if d.HasChange("maxqueue") {
req.MaxQueue = uint64(d.Get("maxqueue").(int))
_, err = c.CloudAPI().LB().BackendServerUpdate(ctx, req) }
if err != nil { if d.HasChange("weight") {
return diag.FromErr(err) req.Weight = uint64(d.Get("weight").(int))
} }
//TODO: перенести servers сюда _, err = c.CloudAPI().LB().BackendServerUpdate(ctx, req)
if err != nil {
return resourceLBBackendServerRead(ctx, d, m) d.SetId("")
} return diag.FromErr(err)
}
func ResourceLBBackendServer() *schema.Resource {
return &schema.Resource{ //TODO: перенести servers сюда
SchemaVersion: 1,
return resourceLBBackendServerRead(ctx, d, m)
CreateContext: resourceLBBackendServerCreate, }
ReadContext: resourceLBBackendServerRead,
UpdateContext: resourceLBBackendServerUpdate, func ResourceLBBackendServer() *schema.Resource {
DeleteContext: resourceLBBackendServerDelete, return &schema.Resource{
SchemaVersion: 1,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext, CreateContext: resourceLBBackendServerCreate,
}, ReadContext: resourceLBBackendServerRead,
UpdateContext: resourceLBBackendServerUpdate,
Timeouts: &schema.ResourceTimeout{ DeleteContext: resourceLBBackendServerDelete,
Create: &constants.Timeout600s,
Read: &constants.Timeout300s, Importer: &schema.ResourceImporter{
Update: &constants.Timeout300s, StateContext: schema.ImportStatePassthroughContext,
Delete: &constants.Timeout300s, },
Default: &constants.Timeout300s,
}, Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout600s,
Schema: map[string]*schema.Schema{ Read: &constants.Timeout300s,
"lb_id": { Update: &constants.Timeout300s,
Type: schema.TypeInt, Delete: &constants.Timeout300s,
Required: true, Default: &constants.Timeout300s,
Description: "ID of the LB instance to backendCreate", },
},
"backend_name": { Schema: map[string]*schema.Schema{
Type: schema.TypeString, "lb_id": {
Required: true, Type: schema.TypeInt,
Description: "Must be unique among all backends of this LB - name of the new backend to create", Required: true,
}, Description: "ID of the LB instance to backendCreate",
"name": { },
Type: schema.TypeString, "backend_name": {
Required: true, Type: schema.TypeString,
Description: "Must be unique among all servers defined for this backend - name of the server definition to add.", Required: true,
}, Description: "Must be unique among all backends of this LB - name of the new backend to create",
"address": { },
Type: schema.TypeString, "name": {
Required: true, Type: schema.TypeString,
Description: "IP address of the server.", Required: true,
}, Description: "Must be unique among all servers defined for this backend - name of the server definition to add.",
"port": { },
Type: schema.TypeInt, "address": {
Required: true, Type: schema.TypeString,
Description: "Port number on the server", Required: true,
}, Description: "IP address of the server.",
"check": { },
Type: schema.TypeString, "port": {
Optional: true, Type: schema.TypeInt,
Computed: true, Required: true,
ValidateFunc: validation.StringInSlice([]string{"disabled", "enabled"}, false), Description: "Port number on the server",
Description: "set to disabled if this server should be used regardless of its state.", },
}, "check": {
"guid": { Type: schema.TypeString,
Type: schema.TypeString, Optional: true,
Computed: true, Computed: true,
}, ValidateFunc: validation.StringInSlice([]string{"disabled", "enabled"}, false),
"downinter": { Description: "set to disabled if this server should be used regardless of its state.",
Type: schema.TypeInt, },
Optional: true, "guid": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"fall": { },
Type: schema.TypeInt, "downinter": {
Optional: true, Type: schema.TypeInt,
Computed: true, Optional: true,
}, Computed: true,
"inter": { },
Type: schema.TypeInt, "fall": {
Optional: true, Type: schema.TypeInt,
Computed: true, Optional: true,
}, Computed: true,
"maxconn": { },
Type: schema.TypeInt, "inter": {
Optional: true, Type: schema.TypeInt,
Computed: true, Optional: true,
}, Computed: true,
"maxqueue": { },
Type: schema.TypeInt, "maxconn": {
Optional: true, Type: schema.TypeInt,
Computed: true, Optional: true,
}, Computed: true,
"rise": { },
Type: schema.TypeInt, "maxqueue": {
Optional: true, Type: schema.TypeInt,
Computed: true, Optional: true,
}, Computed: true,
"slowstart": { },
Type: schema.TypeInt, "rise": {
Optional: true, Type: schema.TypeInt,
Computed: true, Optional: true,
}, Computed: true,
"weight": { },
Type: schema.TypeInt, "slowstart": {
Optional: true, Type: schema.TypeInt,
Computed: true, Optional: true,
}, Computed: true,
}, },
} "weight": {
} Type: schema.TypeInt,
Optional: true,
Computed: true,
},
},
}
}

@ -1,191 +1,195 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package lb package lb
import ( import (
"context" "context"
"strconv" "strconv"
"strings" "strings"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
) )
func resourceLBFrontendCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBFrontendCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendCreate") log.Debugf("resourceLBFrontendCreate")
haveLBID, err := existLBID(ctx, d, m) haveLBID, err := existLBID(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
if !haveLBID { if !haveLBID {
return diag.Errorf("resourceLBFrontendCreate: can't create LB frontend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) return diag.Errorf("resourceLBFrontendCreate: can't create LB frontend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
} }
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
req := lb.FrontendCreateRequest{ req := lb.FrontendCreateRequest{
BackendName: d.Get("backend_name").(string), BackendName: d.Get("backend_name").(string),
LBID: uint64(d.Get("lb_id").(int)), LBID: uint64(d.Get("lb_id").(int)),
FrontendName: d.Get("name").(string), FrontendName: d.Get("name").(string),
} }
_, err = c.CloudAPI().LB().FrontendCreate(ctx, req) _, err = c.CloudAPI().LB().FrontendCreate(ctx, req)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("name").(string))
d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("name").(string))
_, err = utilityLBFrontendCheckPresence(ctx, d, m)
if err != nil { _, err = utilityLBFrontendCheckPresence(ctx, d, m)
return diag.FromErr(err) if err != nil {
} d.SetId("")
return diag.FromErr(err)
return resourceLBFrontendRead(ctx, d, m) }
}
return resourceLBFrontendRead(ctx, d, m)
func resourceLBFrontendRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { }
log.Debugf("resourceLBFrontendRead")
func resourceLBFrontendRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
f, err := utilityLBFrontendCheckPresence(ctx, d, m) log.Debugf("resourceLBFrontendRead")
if err != nil {
d.SetId("") f, err := utilityLBFrontendCheckPresence(ctx, d, m)
return diag.FromErr(err) if err != nil {
} d.SetId("")
return diag.FromErr(err)
lbId, _ := strconv.ParseInt(strings.Split(d.Id(), "#")[0], 10, 32) }
flattenLBFrontend(d, f, lbId) lbId, _ := strconv.ParseInt(strings.Split(d.Id(), "#")[0], 10, 32)
return nil flattenLBFrontend(d, f, lbId)
}
return nil
func resourceLBFrontendDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { }
log.Debugf("resourceLBFrontendDelete")
func resourceLBFrontendDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
_, err := utilityLBFrontendCheckPresence(ctx, d, m) log.Debugf("resourceLBFrontendDelete")
if err != nil {
return diag.FromErr(err) _, err := utilityLBFrontendCheckPresence(ctx, d, m)
} if err != nil {
d.SetId("")
c := m.(*controller.ControllerCfg) return diag.FromErr(err)
req := lb.FrontendDeleteRequest{ }
LBID: uint64(d.Get("lb_id").(int)),
FrontendName: d.Get("name").(string), c := m.(*controller.ControllerCfg)
} req := lb.FrontendDeleteRequest{
LBID: uint64(d.Get("lb_id").(int)),
_, err = c.CloudAPI().LB().FrontendDelete(ctx, req) FrontendName: d.Get("name").(string),
if err != nil { }
return diag.FromErr(err)
} _, err = c.CloudAPI().LB().FrontendDelete(ctx, req)
if err != nil {
d.SetId("") d.SetId("")
return diag.FromErr(err)
return nil }
}
d.SetId("")
func resourceLBFrontendEdit(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
return nil return nil
} }
func ResourceLBFrontend() *schema.Resource { func resourceLBFrontendEdit(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
return &schema.Resource{ return nil
SchemaVersion: 1, }
CreateContext: resourceLBFrontendCreate, func ResourceLBFrontend() *schema.Resource {
ReadContext: resourceLBFrontendRead, return &schema.Resource{
UpdateContext: resourceLBFrontendEdit, SchemaVersion: 1,
DeleteContext: resourceLBFrontendDelete,
CreateContext: resourceLBFrontendCreate,
Importer: &schema.ResourceImporter{ ReadContext: resourceLBFrontendRead,
StateContext: schema.ImportStatePassthroughContext, UpdateContext: resourceLBFrontendEdit,
}, DeleteContext: resourceLBFrontendDelete,
Timeouts: &schema.ResourceTimeout{ Importer: &schema.ResourceImporter{
Create: &constants.Timeout600s, StateContext: schema.ImportStatePassthroughContext,
Read: &constants.Timeout300s, },
Update: &constants.Timeout300s,
Delete: &constants.Timeout300s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout300s, Create: &constants.Timeout600s,
}, Read: &constants.Timeout300s,
Update: &constants.Timeout300s,
Schema: map[string]*schema.Schema{ Delete: &constants.Timeout300s,
"lb_id": { Default: &constants.Timeout300s,
Type: schema.TypeInt, },
Required: true,
Description: "ID of the LB instance to backendCreate", Schema: map[string]*schema.Schema{
}, "lb_id": {
"backend_name": { Type: schema.TypeInt,
Type: schema.TypeString, Required: true,
Required: true, Description: "ID of the LB instance to backendCreate",
}, },
"name": { "backend_name": {
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Required: true,
}, },
"bindings": { "name": {
Type: schema.TypeList, Type: schema.TypeString,
Computed: true, Required: true,
Elem: &schema.Resource{ },
Schema: map[string]*schema.Schema{ "bindings": {
"address": { Type: schema.TypeList,
Type: schema.TypeString, Computed: true,
Computed: true, Elem: &schema.Resource{
}, Schema: map[string]*schema.Schema{
"guid": { "address": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"name": { "guid": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"port": { "name": {
Type: schema.TypeInt, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
}, "port": {
}, Type: schema.TypeInt,
}, Computed: true,
"guid": { },
Type: schema.TypeString, },
Computed: true, },
}, },
}, "guid": {
} Type: schema.TypeString,
} Computed: true,
},
},
}
}

@ -1,211 +1,216 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package lb package lb
import ( import (
"context" "context"
"strconv" "strconv"
"strings" "strings"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
) )
func resourceLBFrontendBindCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBFrontendBindCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendBindCreate") log.Debugf("resourceLBFrontendBindCreate")
haveLBID, err := existLBID(ctx, d, m) haveLBID, err := existLBID(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
if !haveLBID { if !haveLBID {
return diag.Errorf("resourceLBFrontendBindCreate: can't create LB frontend bind because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) return diag.Errorf("resourceLBFrontendBindCreate: can't create LB frontend bind because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
} }
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
req := lb.FrontendBindRequest{ req := lb.FrontendBindRequest{
LBID: uint64(d.Get("lb_id").(int)), LBID: uint64(d.Get("lb_id").(int)),
FrontendName: d.Get("frontend_name").(string), FrontendName: d.Get("frontend_name").(string),
BindingName: d.Get("name").(string), BindingName: d.Get("name").(string),
BindingAddress: d.Get("address").(string), BindingAddress: d.Get("address").(string),
BindingPort: uint64(d.Get("port").(int)), BindingPort: uint64(d.Get("port").(int)),
} }
_, err = c.CloudAPI().LB().FrontendBind(ctx, req) _, err = c.CloudAPI().LB().FrontendBind(ctx, req)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("frontend_name").(string) + "#" + d.Get("name").(string))
d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("frontend_name").(string) + "#" + d.Get("name").(string))
_, err = utilityLBFrontendBindCheckPresence(ctx, d, m)
if err != nil { _, err = utilityLBFrontendBindCheckPresence(ctx, d, m)
return diag.FromErr(err) if err != nil {
} d.SetId("")
return diag.FromErr(err)
return resourceLBFrontendBindRead(ctx, d, m) }
}
return resourceLBFrontendBindRead(ctx, d, m)
func resourceLBFrontendBindRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { }
log.Debugf("resourceLBFrontendBindRead")
func resourceLBFrontendBindRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
b, err := utilityLBFrontendBindCheckPresence(ctx, d, m) log.Debugf("resourceLBFrontendBindRead")
if err != nil {
d.SetId("") b, err := utilityLBFrontendBindCheckPresence(ctx, d, m)
return diag.FromErr(err) if err != nil {
} d.SetId("")
return diag.FromErr(err)
lbId, _ := strconv.ParseInt(strings.Split(d.Id(), "#")[0], 10, 32) }
frontendName := strings.Split(d.Id(), "#")[1]
lbId, _ := strconv.ParseInt(strings.Split(d.Id(), "#")[0], 10, 32)
flattenLBFrontendBind(d, b, lbId, frontendName) frontendName := strings.Split(d.Id(), "#")[1]
return nil flattenLBFrontendBind(d, b, lbId, frontendName)
}
return nil
func resourceLBFrontendBindDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { }
log.Debugf("resourceLBFrontendBindDelete")
func resourceLBFrontendBindDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
_, err := utilityLBFrontendBindCheckPresence(ctx, d, m) log.Debugf("resourceLBFrontendBindDelete")
if err != nil {
return diag.FromErr(err) _, err := utilityLBFrontendBindCheckPresence(ctx, d, m)
} if err != nil {
d.SetId("")
c := m.(*controller.ControllerCfg) return diag.FromErr(err)
req := lb.FrontendBindDeleteRequest{ }
LBID: uint64(d.Get("lb_id").(int)),
FrontendName: d.Get("frontend_name").(string), c := m.(*controller.ControllerCfg)
BindingName: d.Get("name").(string), req := lb.FrontendBindDeleteRequest{
} LBID: uint64(d.Get("lb_id").(int)),
FrontendName: d.Get("frontend_name").(string),
_, err = c.CloudAPI().LB().FrontendBindDelete(ctx, req) BindingName: d.Get("name").(string),
if err != nil { }
return diag.FromErr(err)
} _, err = c.CloudAPI().LB().FrontendBindDelete(ctx, req)
if err != nil {
d.SetId("") d.SetId("")
return diag.FromErr(err)
return nil }
}
d.SetId("")
func resourceLBFrontendBindUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendBindEdit") return nil
c := m.(*controller.ControllerCfg) }
haveLBID, err := existLBID(ctx, d, m) func resourceLBFrontendBindUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
if err != nil { log.Debugf("resourceLBFrontendBindEdit")
return diag.FromErr(err) c := m.(*controller.ControllerCfg)
}
haveLBID, err := existLBID(ctx, d, m)
if !haveLBID { if err != nil {
return diag.Errorf("resourceLBFrontendBindUpdate: can't update LB frontend bind because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) return diag.FromErr(err)
} }
req := lb.FrontendBindUpdateRequest{ if !haveLBID {
FrontendName: d.Get("frontend_name").(string), return diag.Errorf("resourceLBFrontendBindUpdate: can't update LB frontend bind because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
BindingName: d.Get("name").(string), }
LBID: uint64(d.Get("lb_id").(int)),
} req := lb.FrontendBindUpdateRequest{
FrontendName: d.Get("frontend_name").(string),
if d.HasChange("address") { BindingName: d.Get("name").(string),
req.BindingAddress = d.Get("address").(string) LBID: uint64(d.Get("lb_id").(int)),
} }
if d.HasChange("port") { if d.HasChange("address") {
req.BindingPort = uint64(d.Get("port").(int)) req.BindingAddress = d.Get("address").(string)
} }
_, err = c.CloudAPI().LB().FrontendBindUpdate(ctx, req) if d.HasChange("port") {
if err != nil { req.BindingPort = uint64(d.Get("port").(int))
return diag.FromErr(err) }
}
_, err = c.CloudAPI().LB().FrontendBindUpdate(ctx, req)
return resourceLBFrontendBindRead(ctx, d, m) if err != nil {
} d.SetId("")
return diag.FromErr(err)
func ResourceLBFrontendBind() *schema.Resource { }
return &schema.Resource{
SchemaVersion: 1, return resourceLBFrontendBindRead(ctx, d, m)
}
CreateContext: resourceLBFrontendBindCreate,
ReadContext: resourceLBFrontendBindRead, func ResourceLBFrontendBind() *schema.Resource {
UpdateContext: resourceLBFrontendBindUpdate, return &schema.Resource{
DeleteContext: resourceLBFrontendBindDelete, SchemaVersion: 1,
Importer: &schema.ResourceImporter{ CreateContext: resourceLBFrontendBindCreate,
StateContext: schema.ImportStatePassthroughContext, ReadContext: resourceLBFrontendBindRead,
}, UpdateContext: resourceLBFrontendBindUpdate,
DeleteContext: resourceLBFrontendBindDelete,
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout600s, Importer: &schema.ResourceImporter{
Read: &constants.Timeout300s, StateContext: schema.ImportStatePassthroughContext,
Update: &constants.Timeout300s, },
Delete: &constants.Timeout300s,
Default: &constants.Timeout300s, Timeouts: &schema.ResourceTimeout{
}, Create: &constants.Timeout600s,
Read: &constants.Timeout300s,
Schema: map[string]*schema.Schema{ Update: &constants.Timeout300s,
"lb_id": { Delete: &constants.Timeout300s,
Type: schema.TypeInt, Default: &constants.Timeout300s,
Required: true, },
Description: "ID of the LB instance to backendCreate",
}, Schema: map[string]*schema.Schema{
"frontend_name": { "lb_id": {
Type: schema.TypeString, Type: schema.TypeInt,
Required: true, Required: true,
Description: "Must be unique among all backends of this LB - name of the new backend to create", Description: "ID of the LB instance to backendCreate",
}, },
"address": { "frontend_name": {
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Required: true,
}, Description: "Must be unique among all backends of this LB - name of the new backend to create",
"guid": { },
Type: schema.TypeString, "address": {
Computed: true, Type: schema.TypeString,
}, Required: true,
"name": { },
Type: schema.TypeString, "guid": {
Required: true, Type: schema.TypeString,
}, Computed: true,
"port": { },
Type: schema.TypeInt, "name": {
Required: true, Type: schema.TypeString,
}, Required: true,
}, },
} "port": {
} Type: schema.TypeInt,
Required: true,
},
},
}
}

@ -1,181 +1,190 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package locations package locations
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/locations" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/locations"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
) )
func flattenLocationsList(ll *locations.ListLocations) []map[string]interface{} { func flattenLocationsList(ll *locations.ListLocations) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0, len(ll.Data))
for _, l := range ll.Data { for _, l := range ll.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"ckey": l.CKey, "ckey": l.CKey,
"meta": flattens.FlattenMeta(l.Meta), "meta": flattens.FlattenMeta(l.Meta),
"flag": l.Flag, "auth_broker": l.AuthBroker,
"gid": l.GID, "flag": l.Flag,
"guid": l.GUID, "gid": l.GID,
"id": l.ID, "guid": l.GUID,
"location_code": l.LocationCode, "id": l.ID,
"name": l.Name, "location_code": l.LocationCode,
} "name": l.Name,
res = append(res, temp) }
} res = append(res, temp)
return res }
return res
}
}
func dataSourceLocationsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
locations, err := utilityLocationsListCheckPresence(ctx, d, m) func dataSourceLocationsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
if err != nil { locations, err := utilityLocationsListCheckPresence(ctx, d, m)
return diag.FromErr(err) if err != nil {
} d.SetId("")
return diag.FromErr(err)
id := uuid.New() }
d.SetId(id.String())
id := uuid.New()
d.Set("items", flattenLocationsList(locations)) d.SetId(id.String())
d.Set("entry_count", locations.EntryCount)
d.Set("items", flattenLocationsList(locations))
return nil d.Set("entry_count", locations.EntryCount)
}
return nil
func dataSourceLocationsListSchemaMake() map[string]*schema.Schema { }
return map[string]*schema.Schema{
"flag": { func dataSourceLocationsListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString, return map[string]*schema.Schema{
Optional: true, "flag": {
Description: "Filter by flag", Type: schema.TypeString,
}, Optional: true,
"name": { Description: "Filter by flag",
Type: schema.TypeString, },
Optional: true, "name": {
Description: "Filter by name", Type: schema.TypeString,
}, Optional: true,
"by_id": { Description: "Filter by name",
Type: schema.TypeInt, },
Optional: true, "by_id": {
Description: "Filter by ID", Type: schema.TypeInt,
}, Optional: true,
"location_code": { Description: "Filter by ID",
Type: schema.TypeString, },
Optional: true, "location_code": {
Description: "Filter by location code", Type: schema.TypeString,
}, Optional: true,
"page": { Description: "Filter by location code",
Type: schema.TypeInt, },
Optional: true, "page": {
Description: "page number", Type: schema.TypeInt,
}, Optional: true,
"size": { Description: "page number",
Type: schema.TypeInt, },
Optional: true, "size": {
Description: "page size", Type: schema.TypeInt,
}, Optional: true,
"items": { Description: "page size",
Type: schema.TypeList, },
Computed: true, "items": {
Description: "Locations list", Type: schema.TypeList,
Elem: &schema.Resource{ Computed: true,
Schema: map[string]*schema.Schema{ Description: "Locations list",
"ckey": { Elem: &schema.Resource{
Type: schema.TypeString, Schema: map[string]*schema.Schema{
Computed: true, "ckey": {
}, Type: schema.TypeString,
"meta": { Computed: true,
Type: schema.TypeList, },
Computed: true, "meta": {
Elem: &schema.Schema{ Type: schema.TypeList,
Type: schema.TypeString, Computed: true,
}, Elem: &schema.Schema{
}, Type: schema.TypeString,
"flag": { },
Type: schema.TypeString, },
Computed: true, "auth_broker": {
}, Type: schema.TypeList,
"gid": { Computed: true,
Type: schema.TypeInt, Elem: &schema.Schema{
Computed: true, Type: schema.TypeString,
Description: "Grid id", },
}, },
"guid": { "flag": {
Type: schema.TypeInt, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "location id", },
}, "gid": {
"id": { Type: schema.TypeInt,
Type: schema.TypeInt, Computed: true,
Computed: true, Description: "Grid id",
Description: "location id", },
}, "guid": {
"location_code": { Type: schema.TypeInt,
Type: schema.TypeString, Computed: true,
Computed: true, Description: "location id",
Description: "Location code", },
}, "id": {
"name": { Type: schema.TypeInt,
Type: schema.TypeString, Computed: true,
Computed: true, Description: "location id",
Description: "Location name", },
}, "location_code": {
}, Type: schema.TypeString,
}, Computed: true,
}, Description: "Location code",
"entry_count": { },
Type: schema.TypeInt, "name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
} Description: "Location name",
} },
},
func DataSourceLocationsList() *schema.Resource { },
return &schema.Resource{ },
SchemaVersion: 1, "entry_count": {
Type: schema.TypeInt,
ReadContext: dataSourceLocationsListRead, Computed: true,
},
Timeouts: &schema.ResourceTimeout{ }
Read: &constants.Timeout30s, }
Default: &constants.Timeout60s,
}, func DataSourceLocationsList() *schema.Resource {
return &schema.Resource{
Schema: dataSourceLocationsListSchemaMake(), SchemaVersion: 1,
}
} ReadContext: dataSourceLocationsListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceLocationsListSchemaMake(),
}
}

@ -1,84 +1,85 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package locations package locations
import ( import (
"context" "context"
"strings" "strings"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceLocationUrlRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceLocationUrlRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
url, err := utilityLocationUrlCheckPresence(ctx, m) url, err := utilityLocationUrlCheckPresence(ctx, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.SetId(id.String())
url = strings.ReplaceAll(url, "\\", "")
url = strings.ReplaceAll(url, "\"", "") url = strings.ReplaceAll(url, "\\", "")
d.Set("url", url) url = strings.ReplaceAll(url, "\"", "")
d.Set("url", url)
return nil
} return nil
}
func dataSourceLocationUrlSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{ func dataSourceLocationUrlSchemaMake() map[string]*schema.Schema {
"url": { return map[string]*schema.Schema{
Type: schema.TypeString, "url": {
Computed: true, Type: schema.TypeString,
Description: "Location url", Computed: true,
}, Description: "Location url",
} },
} }
}
func DataSourceLocationUrl() *schema.Resource {
return &schema.Resource{ func DataSourceLocationUrl() *schema.Resource {
SchemaVersion: 1, return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceLocationUrlRead,
ReadContext: dataSourceLocationUrlRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s, Timeouts: &schema.ResourceTimeout{
Default: &constants.Timeout60s, Read: &constants.Timeout30s,
}, Default: &constants.Timeout60s,
},
Schema: dataSourceLocationUrlSchemaMake(),
} Schema: dataSourceLocationUrlSchemaMake(),
} }
}

@ -42,146 +42,172 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
) )
func sepsSchemaMake() map[string]*schema.Schema { func dataSourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
res := map[string]*schema.Schema{ rg, err := utilityResgroupCheckPresence(ctx, d, m)
"sep_id": { if err != nil {
Type: schema.TypeString, d.SetId("") // ensure ID is empty in this case
Computed: true, return diag.FromErr(err)
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
} }
d.SetId(strconv.Itoa(d.Get("rg_id").(int)))
return res flattenRg(d, *rg)
return nil
} }
func resourcesSchemaMake() map[string]*schema.Schema { func DataSourceResgroup() *schema.Resource {
res := map[string]*schema.Schema{ return &schema.Resource{
"current": { SchemaVersion: 1,
Type: schema.TypeList,
Computed: true, ReadContext: dataSourceResgroupRead,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{ Timeouts: &schema.ResourceTimeout{
"cpu": { Read: &constants.Timeout30s,
Type: schema.TypeInt, Default: &constants.Timeout60s,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"map": {
Type: schema.TypeMap,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
},
},
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"map": {
Type: schema.TypeMap,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
},
},
},
},
},
}, },
}
return res Schema: dataSourceRgSchemaMake(),
}
} }
// func sepsSchemaMake() map[string]*schema.Schema {
// res := map[string]*schema.Schema{
// "sep_id": {
// Type: schema.TypeString,
// Computed: true,
// },
// "data_name": {
// Type: schema.TypeString,
// Computed: true,
// },
// "disk_size": {
// Type: schema.TypeFloat,
// Computed: true,
// },
// "disk_size_max": {
// Type: schema.TypeInt,
// Computed: true,
// },
// }
// return res
// }
// func resourcesSchemaMake() map[string]*schema.Schema {
// res := map[string]*schema.Schema{
// "current": {
// Type: schema.TypeList,
// Computed: true,
// Elem: &schema.Resource{
// Schema: map[string]*schema.Schema{
// "cpu": {
// Type: schema.TypeInt,
// Computed: true,
// },
// "disk_size": {
// Type: schema.TypeFloat,
// Computed: true,
// },
// "disk_size_max": {
// Type: schema.TypeFloat,
// Computed: true,
// },
// "extips": {
// Type: schema.TypeInt,
// Computed: true,
// },
// "exttraffic": {
// Type: schema.TypeInt,
// Computed: true,
// },
// "gpu": {
// Type: schema.TypeInt,
// Computed: true,
// },
// "ram": {
// Type: schema.TypeInt,
// Computed: true,
// },
// "seps": {
// Type: schema.TypeSet,
// Computed: true,
// Elem: &schema.Resource{
// Schema: map[string]*schema.Schema{
// "sep_id": {
// Type: schema.TypeString,
// Computed: true,
// },
// "map": {
// Type: schema.TypeMap,
// Computed: true,
// Elem: &schema.Schema{
// Type: schema.TypeString,
// },
// },
// },
// },
// },
// },
// },
// },
// "reserved": {
// Type: schema.TypeList,
// Computed: true,
// Elem: &schema.Resource{
// Schema: map[string]*schema.Schema{
// "cpu": {
// Type: schema.TypeInt,
// Computed: true,
// },
// "disk_size": {
// Type: schema.TypeFloat,
// Computed: true,
// },
// "disk_size_max": {
// Type: schema.TypeInt,
// Computed: true,
// },
// "extips": {
// Type: schema.TypeInt,
// Computed: true,
// },
// "exttraffic": {
// Type: schema.TypeInt,
// Computed: true,
// },
// "gpu": {
// Type: schema.TypeInt,
// Computed: true,
// },
// "ram": {
// Type: schema.TypeInt,
// Computed: true,
// },
// "seps": {
// Type: schema.TypeSet,
// Computed: true,
// Elem: &schema.Resource{
// Schema: map[string]*schema.Schema{
// "sep_id": {
// Type: schema.TypeString,
// Computed: true,
// },
// "map": {
// Type: schema.TypeMap,
// Computed: true,
// Elem: &schema.Schema{
// Type: schema.TypeString,
// },
// },
// },
// },
// },
// },
// },
// },
// }
// return res
// }
func aclSchemaMake() map[string]*schema.Schema { func aclSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ res := map[string]*schema.Schema{
"explicit": { "explicit": {
@ -391,29 +417,3 @@ func dataSourceRgSchemaMake() map[string]*schema.Schema {
} }
return res return res
} }
func dataSourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
rg, err := utilityResgroupCheckPresence(ctx, d, m)
if err != nil {
d.SetId("") // ensure ID is empty in this case
return diag.FromErr(err)
}
d.SetId(strconv.Itoa(d.Get("rg_id").(int)))
flattenRg(d, *rg)
return nil
}
func DataSourceResgroup() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceResgroupRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceRgSchemaMake(),
}
}

@ -44,6 +44,7 @@ import (
func dataSourceRgAffinityGroupComputesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceRgAffinityGroupComputesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
rgComputes, err := utilityRgAffinityGroupComputesCheckPresence(ctx, d, m) rgComputes, err := utilityRgAffinityGroupComputesCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -64,7 +65,6 @@ func dataSourceRgAffinityGroupComputesSchemaMake() map[string]*schema.Schema {
Required: true, Required: true,
Description: "Affinity group label", Description: "Affinity group label",
}, },
"items": { "items": {
Type: schema.TypeList, Type: schema.TypeList,
Computed: true, Computed: true,

@ -44,6 +44,7 @@ import (
func dataSourceRgAffinityGroupsGetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceRgAffinityGroupsGetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computes, err := utilityRgAffinityGroupsGetCheckPresence(ctx, d, m) computes, err := utilityRgAffinityGroupsGetCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(strconv.Itoa(d.Get("rg_id").(int))) d.SetId(strconv.Itoa(d.Get("rg_id").(int)))
@ -63,7 +64,6 @@ func dataSourceRgAffinityGroupsGetSchemaMake() map[string]*schema.Schema {
Required: true, Required: true,
Description: "Affinity group label", Description: "Affinity group label",
}, },
"ids": { "ids": {
Type: schema.TypeList, Type: schema.TypeList,
Computed: true, Computed: true,

@ -44,6 +44,7 @@ import (
func dataSourceRgAffinityGroupsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceRgAffinityGroupsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
list, err := utilityRgAffinityGroupsListCheckPresence(ctx, d, m) list, err := utilityRgAffinityGroupsListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }

@ -44,6 +44,7 @@ import (
func dataSourceRgAuditsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceRgAuditsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
rgAudits, err := utilityRgAuditsCheckPresence(ctx, d, m) rgAudits, err := utilityRgAuditsCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -59,7 +60,6 @@ func dataSourceRgAuditsSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
}, },
"items": { "items": {
Type: schema.TypeList, Type: schema.TypeList,
Computed: true, Computed: true,

@ -44,6 +44,7 @@ import (
func dataSourceRGResourceConsumptionGetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceRGResourceConsumptionGetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
RGResourceConsumptionRec, err := utilityRGResourceConsumptionGetCheckPresence(ctx, d, m) RGResourceConsumptionRec, err := utilityRGResourceConsumptionGetCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }

@ -1,287 +1,280 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru> Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
/* /*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp. Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it Please see README.md to learn where to place source code so that it
builds seamlessly. builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/ */
package rg package rg
import ( import (
"context" "context"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func dataSourceRgListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceRgListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
rgList, err := utilityRgListCheckPresence(ctx, d, m) rgList, err := utilityRgListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) d.SetId("")
} return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String()) id := uuid.New()
d.Set("items", flattenRgList(rgList)) d.SetId(id.String())
d.Set("entry_count", rgList.EntryCount) d.Set("items", flattenRgList(rgList))
d.Set("entry_count", rgList.EntryCount)
return nil
} return nil
}
func dataSourceRgListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{ func dataSourceRgListSchemaMake() map[string]*schema.Schema {
"by_id": { res := map[string]*schema.Schema{
Type: schema.TypeInt, "by_id": {
Optional: true, Type: schema.TypeInt,
Description: "Find by ID", Optional: true,
}, Description: "Find by ID",
"name": { },
Type: schema.TypeString, "name": {
Optional: true, Type: schema.TypeString,
Description: "Find by name", Optional: true,
}, Description: "Find by name",
"account_id": { },
Type: schema.TypeInt, "account_id": {
Optional: true, Type: schema.TypeInt,
Description: "Find by account ID", Optional: true,
}, Description: "Find by account ID",
"account_name": { },
Type: schema.TypeString, "account_name": {
Optional: true, Type: schema.TypeString,
Description: "Find by account name", Optional: true,
}, Description: "Find by account name",
"created_after": { },
Type: schema.TypeInt, "created_after": {
Optional: true, Type: schema.TypeInt,
Description: "Find RGs created after specific time (unix timestamp)", Optional: true,
}, Description: "Find RGs created after specific time (unix timestamp)",
"created_before": { },
Type: schema.TypeInt, "created_before": {
Optional: true, Type: schema.TypeInt,
Description: "Find RGs created before specific time (unix timestamp)", Optional: true,
}, Description: "Find RGs created before specific time (unix timestamp)",
"status": { },
Type: schema.TypeString, "status": {
Optional: true, Type: schema.TypeString,
Description: "Find by status", Optional: true,
}, Description: "Find by status",
"lock_status": { },
Type: schema.TypeString, "lock_status": {
Optional: true, Type: schema.TypeString,
Description: "Find by lock status", Optional: true,
}, Description: "Find by lock status",
"includedeleted": { },
Type: schema.TypeBool, "includedeleted": {
Optional: true, Type: schema.TypeBool,
Default: false, Optional: true,
Description: "included deleted resource groups", Default: false,
}, Description: "included deleted resource groups",
"page": { },
Type: schema.TypeInt, "page": {
Optional: true, Type: schema.TypeInt,
Description: "Page number", Optional: true,
}, Description: "Page number",
"size": { },
Type: schema.TypeInt, "size": {
Optional: true, Type: schema.TypeInt,
Description: "Page size", Optional: true,
}, Description: "Page size",
},
"items": { "items": {
Type: schema.TypeList, Type: schema.TypeList,
Computed: true, Computed: true,
Elem: &schema.Resource{ Elem: &schema.Resource{
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"account_acl": { "account_acl": {
Type: schema.TypeList, Type: schema.TypeList,
Computed: true, Computed: true,
Elem: &schema.Resource{ Elem: &schema.Resource{
Schema: aclSchemaMake(), Schema: aclSchemaMake(),
}, },
}, },
"account_id": { "account_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
}, },
"account_name": { "account_name": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"acl": { "created_by": {
Type: schema.TypeList, Type: schema.TypeString,
Computed: true, Computed: true,
Elem: &schema.Resource{ },
Schema: aclSchemaMake(), "created_time": {
}, Type: schema.TypeInt,
}, Computed: true,
"created_by": { },
Type: schema.TypeString, "def_net_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"created_time": { },
Type: schema.TypeInt, "def_net_type": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"def_net_id": { },
Type: schema.TypeInt, "deleted_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"def_net_type": { },
Type: schema.TypeString, "deleted_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"deleted_by": { },
Type: schema.TypeString, "desc": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"deleted_time": { },
Type: schema.TypeInt, "dirty": {
Computed: true, Type: schema.TypeBool,
}, Computed: true,
"desc": { },
Type: schema.TypeString, "gid": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"dirty": { },
Type: schema.TypeBool, "guid": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"gid": { },
Type: schema.TypeInt, "rg_id": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"guid": { },
Type: schema.TypeInt, "lock_status": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"rg_id": { },
Type: schema.TypeInt, "milestones": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"lock_status": { },
Type: schema.TypeString, "name": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"milestones": { },
Type: schema.TypeInt, "register_computes": {
Computed: true, Type: schema.TypeBool,
}, Computed: true,
"name": { },
Type: schema.TypeString, "resource_limits": {
Computed: true, Type: schema.TypeList,
}, Computed: true,
"register_computes": { Elem: &schema.Resource{
Type: schema.TypeBool, Schema: resourceLimitsSchemaMake(),
Computed: true, },
}, },
"resource_limits": { "secret": {
Type: schema.TypeList, Type: schema.TypeString,
Computed: true, Computed: true,
Elem: &schema.Resource{ },
Schema: resourceLimitsSchemaMake(), "status": {
}, Type: schema.TypeString,
}, Computed: true,
"secret": { },
Type: schema.TypeString, "updated_by": {
Computed: true, Type: schema.TypeString,
}, Computed: true,
"status": { },
Type: schema.TypeString, "updated_time": {
Computed: true, Type: schema.TypeInt,
}, Computed: true,
"updated_by": { },
Type: schema.TypeString, "vins": {
Computed: true, Type: schema.TypeList,
}, Computed: true,
"updated_time": { Elem: &schema.Schema{
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, },
}, },
"vins": { "vms": {
Type: schema.TypeList, Type: schema.TypeList,
Computed: true, Computed: true,
Elem: &schema.Schema{ Elem: &schema.Schema{
Type: schema.TypeInt, Type: schema.TypeInt,
}, },
}, },
"vms": { "resource_types": {
Type: schema.TypeList, Type: schema.TypeList,
Computed: true, Computed: true,
Elem: &schema.Schema{ Elem: &schema.Schema{
Type: schema.TypeInt, Type: schema.TypeString,
}, },
}, },
"resource_types": { "cpu_allocation_parameter": {
Type: schema.TypeList, Type: schema.TypeString,
Computed: true, Computed: true,
Elem: &schema.Schema{ },
Type: schema.TypeString, "cpu_allocation_ratio": {
}, Type: schema.TypeFloat,
}, Computed: true,
"cpu_allocation_parameter": { },
Type: schema.TypeString, "uniq_pools": {
Computed: true, Type: schema.TypeList,
}, Computed: true,
"cpu_allocation_ratio": { Elem: &schema.Schema{
Type: schema.TypeFloat, Type: schema.TypeString,
Computed: true, },
}, },
"uniq_pools": { },
Type: schema.TypeList, },
Computed: true, },
Elem: &schema.Schema{ "entry_count": {
Type: schema.TypeString, Type: schema.TypeInt,
}, Computed: true,
}, },
}, }
}, return res
}, }
"entry_count": {
Type: schema.TypeInt, func DataSourceRgList() *schema.Resource {
Computed: true, return &schema.Resource{
}, SchemaVersion: 1,
}
return res ReadContext: dataSourceRgListRead,
}
Timeouts: &schema.ResourceTimeout{
func DataSourceRgList() *schema.Resource { Read: &constants.Timeout30s,
return &schema.Resource{ Default: &constants.Timeout60s,
SchemaVersion: 1, },
ReadContext: dataSourceRgListRead, Schema: dataSourceRgListSchemaMake(),
}
Timeouts: &schema.ResourceTimeout{ }
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceRgListSchemaMake(),
}
}

@ -44,6 +44,7 @@ import (
func dataSourceRgListComputesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceRgListComputesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
listComputes, err := utilityRgListComputesCheckPresence(ctx, d, m) listComputes, err := utilityRgListComputesCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }

@ -44,6 +44,7 @@ import (
func dataSourceRgListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceRgListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
rgList, err := utilityRgListDeletedCheckPresence(ctx, d, m) rgList, err := utilityRgListDeletedCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -122,12 +123,13 @@ func dataSourceRgListDeletedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"acl": { "cpu_allocation_parameter": {
Type: schema.TypeList, Type: schema.TypeString,
Computed: true,
},
"cpu_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true, Computed: true,
Elem: &schema.Resource{
Schema: aclSchemaMake(),
},
}, },
"created_by": { "created_by": {
Type: schema.TypeString, Type: schema.TypeString,

@ -44,6 +44,7 @@ import (
func dataSourceRgListLbRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceRgListLbRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
listLb, err := utilityRgListLbCheckPresence(ctx, d, m) listLb, err := utilityRgListLbCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -307,6 +308,10 @@ func dataSourceRgListLbSchemaMake() map[string]*schema.Schema {
Schema: aclSchemaMake(), Schema: aclSchemaMake(),
}, },
}, },
"backend_haip": {
Type: schema.TypeString,
Computed: true,
},
"backends": { "backends": {
Type: schema.TypeList, Type: schema.TypeList,
Computed: true, Computed: true,
@ -342,6 +347,10 @@ func dataSourceRgListLbSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
}, },
"frontend_haip": {
Type: schema.TypeString,
Computed: true,
},
"frontends": { "frontends": {
Type: schema.TypeList, Type: schema.TypeList,
Computed: true, Computed: true,

@ -44,6 +44,7 @@ import (
func dataSourceRgListPfwRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceRgListPfwRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
listPfw, err := utilityRgListPfwCheckPresence(ctx, d, m) listPfw, err := utilityRgListPfwCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save