main 1.0.1
asteam 6 months ago
parent 6f40af6a5f
commit 003e4d656e

@ -1,136 +1,8 @@
## Version 1.0.0
## Version 1.0.1
### Feature
#### account:
- Add data source dynamix_account_audits_list in cloudapi/account
- Add data source dynamix_account_computes_list in cloudapi/account
- Add data source dynamix_account_consumed_unit_by_type in cloudapi/account
- Add data source dynamix_account_consumed_units in cloudapi/account
- Add data source dynamix_account_disks_list in cloudapi/account
- Add data source dynamix_account_flipgroups_list in cloudapi/account
- Add data source dynamix_account_get_resource_consumption_list in cloudapi/account
- Add data source dynamix_account_get_resource_consumption in cloudapi/account
- Add data source dynamix_account_deleted_list in cloudapi/account
- Add data source dynamix_account_list in cloudapi/account
- Add data source dynamix_account_reserved_units in cloudapi/account
- Add data source dynamix_account_rg_list in cloudapi/account
- Add data source dynamix_account_templates_list in cloudapi/account
- Add data source dynamix_account_vins_list in cloudapi/account
- Add data source dynamix_account in cloudapi/account
- Add resource dynamix_account in cloudapi/account
#### bservice:
- Add data source dynamix_bservice_deleted_list in cloudapi/bservice
- Add data source dynamix_bservice_group in cloudapi/bservice
- Add data source dynamix_bservice_list in cloudapi/bservice
- Add data source dynamix_bservice_snapshot_list in cloudapi/bservice
- Add data source dynamix_bservice in cloudapi/bservice
- Add resource dynamix_bservice_group in cloudapi/bservice
- Add resource dynamix_bservice in cloudapi/bservice
#### disks:
- Add data source dynamix_disk in cloudapi/disks
- Add data source dynamix_disk_list in cloudapi/disks
- Add data source dynamix_disk_list_deleted in cloudapi/disks
- Add data source dynamix_disk_list_types in cloudapi/disks
- Add data source dynamix_disk_list_types_detailed in cloudapi/disks
- Add data source dynamix_disk_list_unattached in cloudapi/disks
- Add data source dynamix_disk_replication in cloudapi/disks
- Add data source dynamix_disk_snapshot in cloudapi/disks
- Add data source dynamix_disk_snapshot_list in cloudapi/disks
- Add resource dynamix_disk in cloudapi/disks
- Add resource dynamix_disk_replication in cloudapi/disks
- Add resource dynamix_disk_snapshot in cloudapi/disks
#### extnet:
- Add data source dynamix_extnet in cloudapi/extnet
- Add data source dynamix_extnet_computes_list in cloudapi/extnet
- Add data source dynamix_extnet_default in cloudapi/extnet
- Add data source dynamix_extnet_list in cloudapi/extnet
#### flipgroup:
- Add data source dynamix_flipgroup in cloudapi/flipgroup
- Add data source dynamix_flipgroup_list in cloudapi/flipgroup
- Add resource dynamix_flipgroup in cloudapi/flipgroup
#### image:
- Add data source dynamix_image in cloudapi/image
- Add data source dynamix_image_list in cloudapi/image
- Add resource dynamix_image in cloudapi/image
- Add resource dynamix_image_virtual in cloudapi/image
#### k8s:
- Add data source dynamix_k8ci_list in cloudapi/k8s
- Add data source dynamix_k8s in cloudapi/k8s
- Add data source dynamix_k8s_computes in cloudapi/k8s
- Add data source dynamix_k8s_list in cloudapi/k8s
- Add data source dynamix_k8s_list_deleted in cloudapi/k8s
- Add data source dynamix_k8s_wg in cloudapi/k8s
- Add data source dynamix_k8s_wg_cloud_init in cloudapi/k8s
- Add data source dynamix_k8s_wg_list in cloudapi/k8s
- Add resource dynamix_k8s_cp in cloudapi/k8s
- Add resource dynamix_k8s_wg in cloudapi/k8s
#### kvmvm:
- Add data source dynamix_kvmvm in cloudapi/kvmvm
- Add data source dynamix_kvmvm_audits in cloudapi/kvmvm
- Add data source dynamix_kvmvm_get_audits in cloudapi/kvmvm
- Add data source dynamix_kvmvm_get_console_url in cloudapi/kvmvm
- Add data source dynamix_kvmvm_get_log in cloudapi/kvmvm
- Add data source dynamix_kvmvm_list in cloudapi/kvmvm
- Add data source dynamix_kvmvm_list_deleted in cloudapi/kvmvm
- Add data source dynamix_kvmvm_pci_device_list in cloudapi/kvmvm
- Add data source dynamix_kvmvm_pfw_list in cloudapi/kvmvm
- Add data source dynamix_kvmvm_snapshot_usage in cloudapi/kvmvm
- Add data source dynamix_kvmvm_user_list in cloudapi/kvmvm
- Add data source dynamix_kvmvm_vgpu_list in cloudapi/kvmvm
- Add resource dynamix_kvmvm in cloudapi/kvmvm
#### lb:
- Add data source dynamix_data_source_lb_list_deleted in cloudapi/lb
- Add data source dynamix_lb_list_deleted in cloudapi/lb
- Add data source dynamix_lb in cloudapi/lb
- Add resource dynamix_lb_backend_server in cloudapi/lb
- Add resource dynamix_lb_backend in cloudapi/lb
- Add resource dynamix_lb_frontend_bind in cloudapi/lb
- Add resource dynamix_lb_frontend in cloudapi/lb
- Add resource dynamix_lb in cloudapi/lb
#### rg:
- Add data source dynamix_rg in cloudapi/rg
- Add data source dynamix_rg_affinity_group_computes in cloudapi/rg
- Add data source dynamix_rg_affinity_groups_get in cloudapi/rg
- Add data source dynamix_rg_affinity_groups_list in cloudapi/rg
- Add data source dynamix_rg_audits in cloudapi/rg
- Add data source dynamix_rg_resource_consumption_get in cloudapi/rg
- Add data source dynamix_rg_list in cloudapi/rg
- Add data source dynamix_rg_list_computes in cloudapi/rg
- Add data source dynamix_rg_list_deleted in cloudapi/rg
- Add data source dynamix_rg_list_lb in cloudapi/rg
- Add data source dynamix_rg_list_pfw in cloudapi/rg
- Add data source dynamix_rg_list_vins in cloudapi/rg
- Add data source dynamix_rg_resource_consumption_list in cloudapi/rg
- Add data source dynamix_rg_usage in cloudapi/rg
- Add resource dynamix_rg in cloudapi/rg
#### stack:
- Add data source dynamix_stack in cloudapi/stack
- Add data source dynamix_stack_list in cloudapi/stack
#### vfpool:
- Add data source dynamix_vfpool in cloudapi/vfpool
- Add data source dynamix_vfpool_list in cloudapi/vfpool
#### vins:
- Add data source dynamix_vins in cloudapi/vins
- Add data source dynamix_vins_audits in cloudapi/vins
- Add data source dynamix_vins_ext_net_list in cloudapi/vins
- Add data source dynamix_vins_ip_list in cloudapi/vins
- Add data source dynamix_vins_list in cloudapi/vins
- Add data source dynamix_vins_list_deleted in cloudapi/vins
- Add data source dynamix_vins_nat_rule_list in cloudapi/vins
- Add data source dynamix_vins_static_route in cloudapi/vins
- Add data source dynamix_vins_static_route_list in cloudapi/vins
- Add resource dynamix_vins in cloudapi/vins
- Add resource dynamix_vins_static_route in cloudapi/vins
### Bugfix
- Fixed dynamix_lb_list data source in cloudapi/lb for the optional field 'rg_id' to work correctly
- Fixex reading configuration data from environment variables
- Fixed configuration validation

@ -8,7 +8,7 @@ ZIPDIR = ./zip
BINARY=${NAME}
WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${SECONDNAMESPACE}/${VERSION}/${OS_ARCH}
MAINPATH = ./cmd/dynamix/
VERSION=1.0.0
VERSION=1.0.1
OS_ARCH=$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH)
FILES = ${BINARY}_${VERSION}_darwin_amd64\

@ -1,12 +1,12 @@
---
# generated by https://github.com/hashicorp/terraform-plugin-docs
page_title: "dynamix_account_deleted_list Data Source - terraform-provider-dynamix"
page_title: "dynamix_account_list_deleted Data Source - terraform-provider-dynamix"
subcategory: ""
description: |-
---
# dynamix_account_deleted_list (Data Source)
# dynamix_account_list_deleted (Data Source)

@ -17,21 +17,21 @@ description: |-
### Required
- `authenticator` (String) Authentication mode to use when connecting to DECORT cloud API. Should be one of 'decs3o', 'legacy', 'jwt' or 'bvs'.
- `controller_url` (String) URL of DECORT Cloud controller to use. API calls will be directed to this URL.
- `authenticator` (String) Authentication mode to use when connecting to DYNAMIX cloud API. Should be one of 'decs3o', 'legacy', 'jwt' or 'bvs'.
- `controller_url` (String) URL of DYNAMIX Cloud controller to use. API calls will be directed to this URL.
### Optional
- `allow_unverified_ssl` (Boolean) If true, DECORT API will not verify SSL certificates. Use this with caution and in trusted environments only! Default is false.
- `app_id` (String) Application ID to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode.
- `app_secret` (String) Application secret to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode.
- `bvs_password` (String) User password for DECORT cloud API operations in 'bvs' authentication mode.
- `bvs_user` (String) User name for DECORT cloud API operations in 'bvs' authentication mode.
- `domain` (String) User password for DECORT cloud API operations in 'bvs' authentication mode.
- `jwt` (String) JWT to access DECORT cloud API in 'jwt' authentication mode.
- `allow_unverified_ssl` (Boolean) If true, DYNAMIX API will not verify SSL certificates. Use this with caution and in trusted environments only! Default is false.
- `app_id` (String) Application ID to access DYNAMIX cloud API in 'decs3o' and 'bvs' authentication mode.
- `app_secret` (String) Application secret to access DYNAMIX cloud API in 'decs3o' and 'bvs' authentication mode.
- `bvs_password` (String) User password for DYNAMIX cloud API operations in 'bvs' authentication mode.
- `bvs_user` (String) User name for DYNAMIX cloud API operations in 'bvs' authentication mode.
- `domain` (String) User password for DYNAMIX cloud API operations in 'bvs' authentication mode.
- `jwt` (String) JWT to access DYNAMIX cloud API in 'jwt' authentication mode.
- `oauth2_url` (String) OAuth2 application URL in 'decs3o' and 'bvs' authentication mode.
- `password` (String) User password for DECORT cloud API operations in 'legacy' authentication mode.
- `password` (String) User password for DYNAMIX cloud API operations in 'legacy' authentication mode.
- `path_config` (String) The path of the configuration file entry.
- `path_token` (String) The path of the token file entry.
- `time_to_refresh` (Number) The number of minutes before the expiration of the token, a refresh will be made.
- `user` (String) User name for DECORT cloud API operations in 'legacy' authentication mode.
- `user` (String) User name for DYNAMIX cloud API operations in 'legacy' authentication mode.

@ -15,8 +15,14 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/stack"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/vfpool"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/vins"
cbaccount "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account"
cbAccount "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account"
cbAudit "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit"
cbLb "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb"
cbNode "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/node"
cbRG "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg"
cbStack "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/stack"
cbVFpool "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool"
cbVins "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins"
)
func newDataSourcesMap() []func() datasource.DataSource {
@ -111,6 +117,9 @@ func newDataSourcesMap() []func() datasource.DataSource {
vfpool.NewDataSourceVFPool,
vfpool.NewDataSourceVFPoolList,
cbVFpool.NewDataSourceVFPool,
cbVFpool.NewDataSourceVFPoolList,
vins.NewDataSourceVINS,
vins.NewDataSourceVINSAudits,
vins.NewDataSourceVINSExtNetList,
@ -121,11 +130,56 @@ func newDataSourcesMap() []func() datasource.DataSource {
vins.NewDataSourceVINSStaticRoute,
vins.NewDataSourceVINSStaticRouteList,
cbaccount.NewDataSourceAccount,
cbaccount.NewDataSourceAccountList,
cbaccount.NewDataSourceAccountVinsList,
cbAccount.NewDataSourceAccount,
cbAccount.NewDataSourceAccountList,
cbAccount.NewDataSourceAccountVinsList,
cbAccount.NewDataSourceAccountAuditsList,
cbAccount.NewDataSourceAccountAvailableTemplatesListDataSource,
cbAccount.NewDataSourceAccountComputesList,
cbAccount.NewDataSourceAccountDisksList,
cbAccount.NewDataSourceAccountFlipgroupsList,
cbAccount.NewDataSourceAccountGetResourceConsumption,
cbAccount.NewDataSourceAccountListDeleted,
cbAccount.NewDataSourceAccountGetResourceConsumptionList,
cbAccount.NewDataSourceAccountRGList,
cbAudit.NewDataSourceAudit,
cbAudit.NewDataSourceAuditLinkedJobs,
cbAudit.NewDataSourceAuditList,
cbNode.NewDataSourceNode,
cbNode.NewDataSourceNodeList,
cbLb.NewDataSourceLB,
cbLb.NewDataSourceLBList,
cbLb.NewDataSourceLBListDeleted,
cbStack.NewDataSourceStack,
cbStack.NewDataSourceStackList,
cbVins.NewDataSourceVINS,
cbVins.NewDataSourceVINSAudits,
cbVins.NewDataSourceVINSExtNetList,
cbVins.NewDataSourceVINSIPList,
cbVins.NewDataSourceVINSList,
cbVins.NewDataSourceVINSListDeleted,
cbVins.NewDataSourceVINSNATRuleList,
cbVins.NewDataSourceVINSStaticRoute,
cbVins.NewDataSourceVINSStaticRouteList,
cbRG.NewDataSourceRG,
cbRG.NewDataSourceRGList,
cbRG.NewDataSourceRGListDeleted,
cbRG.NewDataSourceRGAffinityGroupComputes,
cbRG.NewDataSourceRGAffinityGroupsGet,
cbRG.NewDataSourceRGAffinityGroupsList,
cbRG.NewDataSourceRGAudits,
cbRG.NewDataSourceRGGetResourceConsumption,
cbRG.NewDataSourceRGResourceConsumptionList,
cbRG.NewDataSourceRGUsage,
cbRG.NewDataSourceRGListComputes,
cbRG.NewDataSourceRGListLB,
cbRG.NewDataSourceRGListPFW,
cbRG.NewDataSourceRGListVins,
}
}

@ -65,7 +65,7 @@ func (p *DynamixProvider) Schema(_ context.Context, _ provider.SchemaRequest, re
resp.Schema = schema.Schema{
Attributes: map[string]schema.Attribute{
"authenticator": schema.StringAttribute{
MarkdownDescription: "Authentication mode to use when connecting to DECORT cloud API. Should be one of 'decs3o', 'legacy', 'jwt' or 'bvs'.",
MarkdownDescription: "Authentication mode to use when connecting to DYNAMIX cloud API. Should be one of 'decs3o', 'legacy', 'jwt' or 'bvs'.",
Required: true,
Validators: []validator.String{
stringvalidator.OneOfCaseInsensitive("decs3o", "legacy", "jwt", "bvs"), // ignore case while validating
@ -76,43 +76,43 @@ func (p *DynamixProvider) Schema(_ context.Context, _ provider.SchemaRequest, re
Optional: true,
},
"controller_url": schema.StringAttribute{
MarkdownDescription: "URL of DECORT Cloud controller to use. API calls will be directed to this URL.",
MarkdownDescription: "URL of DYNAMIX Cloud controller to use. API calls will be directed to this URL.",
Required: true,
},
"user": schema.StringAttribute{
MarkdownDescription: "User name for DECORT cloud API operations in 'legacy' authentication mode.",
MarkdownDescription: "User name for DYNAMIX cloud API operations in 'legacy' authentication mode.",
Optional: true,
},
"password": schema.StringAttribute{
MarkdownDescription: "User password for DECORT cloud API operations in 'legacy' authentication mode.",
MarkdownDescription: "User password for DYNAMIX cloud API operations in 'legacy' authentication mode.",
Optional: true,
},
"bvs_user": schema.StringAttribute{
MarkdownDescription: "User name for DECORT cloud API operations in 'bvs' authentication mode.",
MarkdownDescription: "User name for DYNAMIX cloud API operations in 'bvs' authentication mode.",
Optional: true,
},
"bvs_password": schema.StringAttribute{
MarkdownDescription: "User password for DECORT cloud API operations in 'bvs' authentication mode.",
MarkdownDescription: "User password for DYNAMIX cloud API operations in 'bvs' authentication mode.",
Optional: true,
},
"domain": schema.StringAttribute{
MarkdownDescription: "User password for DECORT cloud API operations in 'bvs' authentication mode.",
MarkdownDescription: "User password for DYNAMIX cloud API operations in 'bvs' authentication mode.",
Optional: true,
},
"app_id": schema.StringAttribute{
MarkdownDescription: "Application ID to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode.",
MarkdownDescription: "Application ID to access DYNAMIX cloud API in 'decs3o' and 'bvs' authentication mode.",
Optional: true,
},
"app_secret": schema.StringAttribute{
MarkdownDescription: "Application secret to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode.",
MarkdownDescription: "Application secret to access DYNAMIX cloud API in 'decs3o' and 'bvs' authentication mode.",
Optional: true,
},
"jwt": schema.StringAttribute{
MarkdownDescription: "JWT to access DECORT cloud API in 'jwt' authentication mode.",
MarkdownDescription: "JWT to access DYNAMIX cloud API in 'jwt' authentication mode.",
Optional: true,
},
"allow_unverified_ssl": schema.BoolAttribute{
MarkdownDescription: "If true, DECORT API will not verify SSL certificates. Use this with caution and in trusted environments only! Default is false.",
MarkdownDescription: "If true, DYNAMIX API will not verify SSL certificates. Use this with caution and in trusted environments only! Default is false.",
Optional: true,
},
"path_config": schema.StringAttribute{
@ -137,7 +137,8 @@ func (p *DynamixProvider) Configure(ctx context.Context, req provider.ConfigureR
diags := req.Config.Get(ctx, &config)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
log.Debugf("Provider Configure error after req.Config.Get")
resp.Diagnostics.AddError("Provider Configure: ", "error after req.Config.Get")
log.Error("Provider Configure error after req.Config.Get")
return
}
@ -148,7 +149,8 @@ func (p *DynamixProvider) Configure(ctx context.Context, req provider.ConfigureR
// Validate and set up authentication mode
mode, err := dynamixConfig.validateAuthenticator()
if err != nil {
log.Debug(err)
log.Error(err)
resp.Diagnostics.AddError("Provider Configure: validate error:", err.Error())
return
}

@ -6,7 +6,6 @@ import (
"os"
"strings"
log "github.com/sirupsen/logrus"
sdk_config "repository.basistech.ru/BASIS/decort-golang-sdk/config"
)
@ -38,82 +37,82 @@ type dynamixProviderConfig struct {
func (d *dynamixProviderConfig) new(config dynamixProviderModel) {
d.authenticator = strings.ToLower(config.Authenticator.ValueString())
if config.Oauth2Url.IsUnknown() {
d.oauth2Url = os.Getenv("DECORT_OAUTH2_URL")
if config.Oauth2Url.IsNull() {
d.oauth2Url = os.Getenv("DYNAMIX_OAUTH2_URL")
} else {
d.oauth2Url = config.Oauth2Url.ValueString()
}
d.oauth2Url = strings.ToLower(d.oauth2Url)
if config.ControllerUrl.IsNull() {
d.controllerUrl = os.Getenv("DYNAMIX_CONTROLLER_URL")
} else {
d.controllerUrl = strings.ToLower(config.ControllerUrl.ValueString())
if d.controllerUrl == "" {
log.Debugf("empty DECORT cloud controller URL provided")
return
}
if config.User.IsUnknown() {
d.user = os.Getenv("DECORT_USER")
if config.User.IsNull() {
d.user = os.Getenv("DYNAMIX_USER")
} else {
d.user = config.User.ValueString()
}
if config.Password.IsUnknown() {
d.password = os.Getenv("DECORT_PASSWORD")
if config.Password.IsNull() {
d.password = os.Getenv("DYNAMIX_PASSWORD")
} else {
d.password = config.Password.ValueString()
}
if config.BvsUser.IsUnknown() {
d.bvsUser = os.Getenv("DECORT_BVS_USER")
if config.BvsUser.IsNull() {
d.bvsUser = os.Getenv("DYNAMIX_BVS_USER")
} else {
d.bvsUser = config.BvsUser.ValueString()
}
if config.BvsPassword.IsUnknown() {
d.bvsPassword = os.Getenv("DECORT_BVS_PASSWORD")
if config.BvsPassword.IsNull() {
d.bvsPassword = os.Getenv("DYNAMIX_BVS_PASSWORD")
} else {
d.bvsPassword = config.BvsPassword.ValueString()
}
if config.Domain.IsUnknown() {
d.domain = os.Getenv("DECORT_DOMAIN")
if config.Domain.IsNull() {
d.domain = os.Getenv("DYNAMIX_DOMAIN")
} else {
d.domain = config.Domain.ValueString()
}
if config.AppId.IsUnknown() {
d.appId = os.Getenv("DECORT_APP_ID")
if config.AppId.IsNull() {
d.appId = os.Getenv("DYNAMIX_APP_ID")
} else {
d.appId = config.AppId.ValueString()
}
if config.AppSecret.IsUnknown() {
d.appSecret = os.Getenv("DECORT_APP_SECRET")
if config.AppSecret.IsNull() {
d.appSecret = os.Getenv("DYNAMIX_APP_SECRET")
} else {
d.appSecret = config.AppSecret.ValueString()
}
if config.Jwt.IsUnknown() {
d.jwt = os.Getenv("DECORT_JWT")
if config.Jwt.IsNull() {
d.jwt = os.Getenv("DYNAMIX_JWT")
} else {
d.jwt = config.Jwt.ValueString()
}
if config.AllowUnverifiedSsl.IsUnknown() {
if config.AllowUnverifiedSsl.IsNull() {
d.allowUnverifiedSsl = false // default false
} else {
d.allowUnverifiedSsl = config.AllowUnverifiedSsl.ValueBool()
}
if !config.PathConfig.IsUnknown() {
if !config.PathConfig.IsNull() {
d.pathConfig = config.PathConfig.ValueString()
}
if !config.PathToken.IsUnknown() {
if !config.PathToken.IsNull() {
d.pathToken = config.PathToken.ValueString()
}
if !config.TimeToRefresh.IsUnknown() {
if !config.TimeToRefresh.IsNull() {
d.timeToRefresh = config.TimeToRefresh.ValueInt64()
}
@ -125,6 +124,12 @@ func (d *dynamixProviderConfig) new(config dynamixProviderModel) {
// reason, the method will return mode = MODE_UNDEF and error.
func (d *dynamixProviderConfig) validateAuthenticator() (int, error) {
var mode = MODE_UNDEF
if d.oauth2Url == "" {
return mode, fmt.Errorf("OAuth2 URL is required")
}
if d.controllerUrl == "" {
return mode, fmt.Errorf("controllerURL is required")
}
switch d.authenticator {
case "jwt":
if d.jwt == "" {
@ -132,9 +137,6 @@ func (d *dynamixProviderConfig) validateAuthenticator() (int, error) {
}
mode = MODE_JWT
case "decs3o":
if d.oauth2Url == "" {
return mode, fmt.Errorf("authenticator mode 'decs3o' specified but no OAuth2 URL provided")
}
if d.appId == "" {
return mode, fmt.Errorf("authenticator mode 'decs3o' specified but no Application ID provided")
}
@ -149,6 +151,12 @@ func (d *dynamixProviderConfig) validateAuthenticator() (int, error) {
if d.password == "" {
return mode, fmt.Errorf("authenticator mode 'legacy' specified but no password provided")
}
if d.appId == "" {
return mode, fmt.Errorf("authenticator mode 'legacy' specified but no Application ID provided")
}
if d.appSecret == "" {
return mode, fmt.Errorf("authenticator mode 'legacy' specified but no Secret ID provided")
}
mode = MODE_LEGACY
case "bvs":
if d.bvsUser == "" {
@ -157,9 +165,6 @@ func (d *dynamixProviderConfig) validateAuthenticator() (int, error) {
if d.bvsPassword == "" {
return mode, fmt.Errorf("authenticator mode 'bvs' specified but no bvs password provided")
}
if d.oauth2Url == "" {
return mode, fmt.Errorf("authenticator mode 'bvs' specified but no bvs URL provided")
}
if d.appId == "" {
return mode, fmt.Errorf("authenticator mode 'bvs' specified but no Application ID provided")
}

@ -12,6 +12,10 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/lb"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/vins"
cbLb "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb"
cbAccount "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account"
cbVFpool "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool"
cbVins "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins"
)
func newResourcesMap() []func() resource.Resource {
@ -36,5 +40,16 @@ func newResourcesMap() []func() resource.Resource {
vins.NewResourceVINSStaticRoute,
bservice.NewResourceBService,
bservice.NewResourceBServiceGroup,
cbAccount.NewResourceAccount,
cbLb.NewResourceLB,
cbLb.NewResourceLBBackend,
cbLb.NewResourceLBBackendServer,
cbLb.NewResourceLBFrontend,
cbLb.NewResourceLBFrontendBind,
cbVFpool.NewResourceVFPool,
cbVins.NewResourceVINS,
cbVins.NewResourceVINSStaticRoute,
}
}

@ -77,7 +77,7 @@ func (d *dataSourceAccountListDeleted) Schema(ctx context.Context, _ datasource.
}
func (d *dataSourceAccountListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_account_deleted_list"
resp.TypeName = req.ProviderTypeName + "_account_list_deleted"
}
// Configure adds the provider configured client to the data source.

@ -41,7 +41,7 @@ type resourceAccount struct {
func (r *resourceAccount) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
resp.Diagnostics.AddError(
"Only users with admin privileges are able to create accounts. Contact your platform administrator or import existing account.",
"Use 'terraform import basis_account.<NAME> <ID>' command to import existing account configuration",
"Use 'terraform import dynamix_account.<NAME> <ID>' command to import existing account configuration",
)
return
}

@ -14,7 +14,7 @@ func AccountDataSourceCheckPresence(ctx context.Context, accountId uint64, c *de
recordAccount, err := c.CloudAPI().Account().Get(ctx, account.GetRequest{AccountID: accountId})
if err != nil {
return nil, fmt.Errorf("cannot get info about extnet with error: %w", err)
return nil, fmt.Errorf("cannot get info about account with error: %w", err)
}
tflog.Info(ctx, "AccountDataSourceCheckPresence: response from CloudAPI().Account().Get",

@ -24,7 +24,7 @@ func LBListDataSourceCheckPresence(ctx context.Context, plan *models.DataSourceL
listLBReq.AccountID = uint64(plan.AccountID.ValueInt64())
}
if !plan.RgID.IsNull() {
listLBReq.RGID = uint64(plan.ByID.ValueInt64())
listLBReq.RGID = uint64(plan.RgID.ValueInt64())
}
if !plan.TechStatus.IsNull() {
listLBReq.TechStatus = plan.TechStatus.ValueString()

@ -113,7 +113,7 @@ func MakeSchemaResourceVINS() map[string]schema.Attribute {
Required: true,
Description: "type of the reservation",
Validators: []validator.String{
stringvalidator.OneOfCaseInsensitive("DHCP", "VIP", "EXCLUDE"), // case is ignored
stringvalidator.OneOfCaseInsensitive("DHCP", "VIP", "EXCLUDED"), // case is ignored
},
},
"ip_addr": schema.StringAttribute{

@ -47,7 +47,7 @@ func VINSListDeletedDataSourceCheckPresence(ctx context.Context, plan *models.Da
tflog.Info(ctx, "VINSListDeletedDataSourceCheckPresence: before call CloudAPI().VINS().ListDeleted", map[string]any{"req": listReq})
list, err := c.CloudAPI().VINS().ListDeleted(ctx, listReq)
if err != nil {
diags.AddError("Cannot get info about vins list", err.Error())
diags.AddError("Cannot get info about vins deleted list", err.Error())
return nil, diags
}
tflog.Info(ctx, "VINSListDeletedDataSourceCheckPresence: successfull response from CloudAPI().VINS().ListDeleted")

@ -0,0 +1,91 @@
package cbAccount
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceAccountAuditsList{}
)
func NewDataSourceAccountAuditsList() datasource.DataSource {
return &dataSourceAccountAuditsList{}
}
// dataSourceAccountAuditsList is the data source implementation.
type dataSourceAccountAuditsList struct {
client *decort.DecortClient
}
func (d *dataSourceAccountAuditsList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceAccountAuditsListModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountAuditsList: Error get state")
return
}
accountId := uint64(state.AccountID.ValueInt64())
tflog.Info(ctx, "Read dataSourceAccountAuditsList: got state successfully", map[string]any{"account_id": accountId})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountAuditsList: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceAccountAuditsList: set timeouts successfully", map[string]any{
"account_id": accountId,
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.AccountAuditsDataSourceList(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountAuditsList: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountAuditsList: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceAccountAuditsList", map[string]any{"account_id": accountId})
}
func (d *dataSourceAccountAuditsList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceAccountAuditsList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceAccountAuditsList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_account_audits_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceAccountAuditsList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceAccountAuditsList")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceAccountAuditsList successfully")
}

@ -0,0 +1,91 @@
package cbAccount
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceAccountAvailableTemplatesListDataSource{}
)
func NewDataSourceAccountAvailableTemplatesListDataSource() datasource.DataSource {
return &dataSourceAccountAvailableTemplatesListDataSource{}
}
// dataSourceAccountAvailableTemplatesListDataSource is the data source implementation.
type dataSourceAccountAvailableTemplatesListDataSource struct {
client *decort.DecortClient
}
func (d *dataSourceAccountAvailableTemplatesListDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceAccountAvailableTemplatesListModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read DataSourceAccountAvailableTemplatesListModel: Error get state")
return
}
accountID := uint64(state.AccountID.ValueInt64())
tflog.Info(ctx, "Read DataSourceAccountAvailableTemplatesListModel: got state successfully", map[string]any{"account_id": accountID})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read DataSourceAccountAvailableTemplatesListModel: Error set timeout")
return
}
tflog.Info(ctx, "Read DataSourceAccountAvailableTemplatesListModel: set timeouts successfully", map[string]any{
"account_id": accountID,
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.AccountAvailableTemplatesListDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read DataSourceAccountAvailableTemplatesListModel: Error flatten data source AccountAvailableTemplatesListDataSource")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read DataSourceAccountAvailableTemplatesListModel: Error set state")
return
}
tflog.Info(ctx, "End read DataSourceAccountAvailableTemplatesListModel", map[string]any{"account_id": accountID})
}
func (d *dataSourceAccountAvailableTemplatesListDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceAccountAvailableTemplatesList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceAccountAvailableTemplatesListDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_account_available_templates_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceAccountAvailableTemplatesListDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure DataSourceAccountAvailableTemplatesListModel")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure DataSourceAccountAvailableTemplatesListModel successfully")
}

@ -0,0 +1,88 @@
package cbAccount
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceAccountComputesList{}
)
func NewDataSourceAccountComputesList() datasource.DataSource {
return &dataSourceAccountComputesList{}
}
// NewDataSourceAccountComputesList is the data source implementation.
type dataSourceAccountComputesList struct {
client *decort.DecortClient
}
func (d *dataSourceAccountComputesList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.ListComputesModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountComputesList: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceAccountComputesList: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountComputesList: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceAccountComputesList: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.AccountComputesListDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountComputesList: Error flatten data source")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountComputesList: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceAccountComputesList")
}
func (d *dataSourceAccountComputesList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceAccountComputesList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceAccountComputesList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_account_computes_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceAccountComputesList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceAccountComputesList")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceAccountComputesList successfully")
}

@ -0,0 +1,88 @@
package cbAccount
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceAccountDisksList{}
)
func NewDataSourceAccountDisksList() datasource.DataSource {
return &dataSourceAccountDisksList{}
}
// dataSourceAccountDisksList is the data source implementation.
type dataSourceAccountDisksList struct {
client *decort.DecortClient
}
func (d *dataSourceAccountDisksList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceAccountDisksListModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountDisksList: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceAccountDisksList: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountDisksList: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceAccountDisksList: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.AccountDisksListDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountDisksList: Error flatten data source account disks list")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountDisksList: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceAccountDisksList")
}
func (d *dataSourceAccountDisksList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceAccountDisksList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceAccountDisksList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_account_disks_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceAccountDisksList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceAccountDisksList")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceAccountDisksList successfully")
}

@ -0,0 +1,88 @@
package cbAccount
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceAccountFlipgroupsList{}
)
func NewDataSourceAccountFlipgroupsList() datasource.DataSource {
return &dataSourceAccountFlipgroupsList{}
}
// dataSourceAccountFlipgroupsList is the data source implementation.
type dataSourceAccountFlipgroupsList struct {
client *decort.DecortClient
}
func (d *dataSourceAccountFlipgroupsList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceAccountFlipgroupsListModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountFlipgroupsList: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceAccountFlipgroupsList: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountFlipgroupsList: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceAccountFlipgroupsList: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.AccountFlipgroupsListDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountFlipgroupsList: Error flatten data source")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountFlipgroupsList: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceAccountFlipgroupsList")
}
func (d *dataSourceAccountFlipgroupsList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceAccountFlipgroupsList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceAccountFlipgroupsList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_account_flipgroups_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceAccountFlipgroupsList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceAccountFlipgroupsList")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceAccountFlipgroupsList successfully")
}

@ -0,0 +1,91 @@
package cbAccount
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceAccountGetResourceConsumption{}
)
func NewDataSourceAccountGetResourceConsumption() datasource.DataSource {
return &dataSourceAccountGetResourceConsumption{}
}
// dataSourceAccountGetResourceConsumption is the data source implementation.
type dataSourceAccountGetResourceConsumption struct {
client *decort.DecortClient
}
func (d *dataSourceAccountGetResourceConsumption) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.AccountGetResourceConsumptionModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumption: Error get state")
return
}
accountId := uint64(state.AccountID.ValueInt64())
tflog.Info(ctx, "Read dataSourceAccountGetResourceConsumption: got state successfully", map[string]any{"account_id": accountId})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumption: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceAccountGetResourceConsumption: set timeouts successfully", map[string]any{
"account_id": accountId,
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.AccountGetResourceConsumptionDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumption: Error flatten data source account")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumption: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceAccountGetResourceConsumption", map[string]any{"account_id": accountId})
}
func (d *dataSourceAccountGetResourceConsumption) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceAccountGetResourceConsumption(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceAccountGetResourceConsumption) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_account_resource_consumption_get"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceAccountGetResourceConsumption) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceAccountGetResourceConsumption")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceAccountGetResourceConsumption successfully")
}

@ -0,0 +1,88 @@
package cbAccount
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceAccountListDeleted{}
)
func NewDataSourceAccountListDeleted() datasource.DataSource {
return &dataSourceAccountListDeleted{}
}
// dataSourceAccountListDeleted is the data source implementation.
type dataSourceAccountListDeleted struct {
client *decort.DecortClient
}
func (d *dataSourceAccountListDeleted) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceAccountListDeletedModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountListDeleted: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceAccountListDeleted: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountListDeleted: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceAccountListDeleted: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.AccountListDeletedDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountListDeleted: Error flatten data source")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountListDeleted: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceAccountListDeleted")
}
func (d *dataSourceAccountListDeleted) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceAccountListDeleted(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceAccountListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_account_list_deleted"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceAccountListDeleted) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceAccountListDeleted")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceAccountListDeleted successfully")
}

@ -0,0 +1,89 @@
package cbAccount
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceAccountGetResourceConsumptionList{}
)
func NewDataSourceAccountGetResourceConsumptionList() datasource.DataSource {
return &dataSourceAccountGetResourceConsumptionList{}
}
// dataSourceAccountGetResourceConsumptionList is the data source implementation.
type dataSourceAccountGetResourceConsumptionList struct {
client *decort.DecortClient
}
func (d *dataSourceAccountGetResourceConsumptionList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.AccountGetResourceConsumptionListModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumptionList: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceAccountGetResourceConsumptionList: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumptionList: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceAccountGetResourceConsumptionList: set timeouts successfully", map[string]any{
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.AccountGetResourceConsumptionList(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumptionList: Error flatten data source account")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumptionList: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceAccountGetResourceConsumptionList")
}
func (d *dataSourceAccountGetResourceConsumptionList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceAccountGetResourceListConsumption(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceAccountGetResourceConsumptionList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_account_resource_consumption_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceAccountGetResourceConsumptionList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceAccountGetResourceConsumptionList")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceAccountGetResourceConsumptionList successfully")
}

@ -0,0 +1,88 @@
package cbAccount
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceAccountRGList{}
)
func NewDataSourceAccountRGList() datasource.DataSource {
return &dataSourceAccountRGList{}
}
// dataSourceAccountList is the data source implementation.
type dataSourceAccountRGList struct {
client *decort.DecortClient
}
func (d *dataSourceAccountRGList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceAccountRGListModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountRGList: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceAccountRGList: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountRGList: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceAccountRGList: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.AccountRGListDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountRGList: Error flatten data source")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAccountRGList: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceAccountRGList")
}
func (d *dataSourceAccountRGList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceAccountRGList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceAccountRGList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_account_rg_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceAccountRGList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceAccountRGList")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceAccountRGList successfully")
}

@ -0,0 +1,59 @@
package flattens
import (
"context"
"fmt"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities"
)
// AccountAuditsDataSourceList flattens data source for account audits.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func AccountAuditsDataSourceList(ctx context.Context, state *models.DataSourceAccountAuditsListModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.AccountAuditsDataSourceList")
diags := diag.Diagnostics{}
accountId := uint64(state.AccountID.ValueInt64())
auditsList, err := utilities.AccountAuditsListDataSourceCheckPresence(ctx, accountId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about account audits with account ID %v", accountId), err.Error())
return diags
}
tflog.Info(ctx, "flattens.AccountAuditsDataSourceList: before flatten", map[string]any{"account_id": accountId})
id := uuid.New()
*state = models.DataSourceAccountAuditsListModel{
AccountID: state.AccountID,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
}
items := make([]models.ItemAuditModel, 0, len(*auditsList))
for _, item := range *auditsList {
i := models.ItemAuditModel{
Call: types.StringValue(item.Call),
ResponseTime: types.Float64Value(item.ResponseTime),
StatusCode: types.Int64Value(int64(item.StatusCode)),
Timestamp: types.Float64Value(item.Timestamp),
User: types.StringValue(item.User),
}
items = append(items, i)
}
state.Items = items
tflog.Info(ctx, "flattens.AccountAuditsDataSourceList: after flatten", map[string]any{"account_id": state.Id.ValueString()})
tflog.Info(ctx, "End flattens.AccountAuditsDataSourceList", map[string]any{"account_id": state.Id.ValueString()})
return nil
}

@ -0,0 +1,41 @@
package flattens
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities"
)
// AccountAvailableTemplatesListDataSource flattens data source for templates list.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func AccountAvailableTemplatesListDataSource(ctx context.Context, state *models.DataSourceAccountAvailableTemplatesListModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.AccountAvailableTemplatesListDataSource")
diags := diag.Diagnostics{}
templatesList, err := utilities.AccountAvailableTemplatesListDataSourceCheckPresence(ctx, uint64(state.AccountID.ValueInt64()), c)
if err != nil {
diags.AddError("Cannot get info about templates list", err.Error())
return diags
}
tflog.Info(ctx, "flattens.AccountAvailableTemplatesListDataSource: before flatten")
*state = models.DataSourceAccountAvailableTemplatesListModel{
AccountID: state.AccountID,
Timeouts: state.Timeouts,
Items: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, templatesList),
}
tflog.Info(ctx, "flattens.AccountAvailableTemplatesListDataSource: after flatten")
tflog.Info(ctx, "End flattens.AccountAvailableTemplatesListDataSource")
return nil
}

@ -0,0 +1,81 @@
package flattens
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities"
)
// AccountComputesListDataSource flattens data source for account computes list.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func AccountComputesListDataSource(ctx context.Context, state *models.ListComputesModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.AccountComputesListDataSource")
diags := diag.Diagnostics{}
cList, err := utilities.AccountComputesListDataSourceCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Cannot get info about computes list", err.Error())
return diags
}
tflog.Info(ctx, "flattens.AccountComputesListDataSource: before flatten")
*state = models.ListComputesModel{
AccountID: state.AccountID,
Name: state.Name,
RGName: state.RGName,
RGID: state.RGID,
TechStatus: state.TechStatus,
IpAddress: state.IpAddress,
ExtNetName: state.ExtNetName,
ExtNetID: state.ExtNetID,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
EntryCount: types.Int64Value(int64(cList.EntryCount)),
}
data := make([]models.ItemComputeModel, 0, len(cList.Data))
for _, v := range cList.Data {
item := models.ItemComputeModel{
AccountID: types.Int64Value(int64(v.AccountID)),
AccountName: types.StringValue(v.AccountName),
CPUs: types.Int64Value(int64(v.CPUs)),
CreatedBy: types.StringValue(v.CreatedBy),
CreatedTime: types.Int64Value(int64(v.CreatedTime)),
DeletedBy: types.StringValue(v.DeletedBy),
DeletedTime: types.Int64Value(int64(v.DeletedTime)),
ID: types.Int64Value(int64(v.ID)),
Name: types.StringValue(v.Name),
RAM: types.Int64Value(int64(v.RAM)),
Registered: types.BoolValue(v.Registered),
RGID: types.Int64Value(int64(v.RGID)),
RgName: types.StringValue(v.RgName),
Status: types.StringValue(v.Status),
TechStatus: types.StringValue(v.TechStatus),
TotalDisksSize: types.Int64Value(int64(v.TotalDisksSize)),
UpdatedBy: types.StringValue(v.UpdatedBy),
UpdatedTime: types.Int64Value(int64(v.UpdatedTime)),
UserManaged: types.BoolValue(v.UserManaged),
VINSConnected: types.Int64Value(int64(v.VINSConnected)),
}
data = append(data, item)
}
state.Items = data
tflog.Info(ctx, "flattens.AccountComputesListDataSource: after flatten")
tflog.Info(ctx, "End flattens.AccountComputesListDataSource")
return nil
}

@ -0,0 +1,66 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities"
)
// AccountDisksListDataSource flattens data source for account disks list.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func AccountDisksListDataSource(ctx context.Context, state *models.DataSourceAccountDisksListModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.AccountDisksListDataSource")
diags := diag.Diagnostics{}
disksList, err := utilities.AccountDisksListCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("flattens.AccountDisksListDataSource: Cannot get info", err.Error())
return diags
}
tflog.Info(ctx, "flattens.AccountDisksListDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceAccountDisksListModel{
AccountID: state.AccountID,
DiskID: state.DiskID,
Name: state.Name,
DiskMaxSize: state.DiskMaxSize,
Type: state.Type,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
SortBy: state.SortBy,
Id: types.StringValue(id.String()),
EntryCount: types.Int64Value(int64(disksList.EntryCount)),
}
items := make([]models.ItemDiskModel, 0, len(disksList.Data))
for _, item := range disksList.Data {
i := models.ItemDiskModel{
DiskID: types.Int64Value(int64(item.ID)),
DiskName: types.StringValue(item.Name),
Pool: types.StringValue(item.Pool),
SEPID: types.Int64Value(int64(item.SepID)),
Shareable: types.BoolValue(item.Shareable),
SizeMax: types.Int64Value(int64(item.SizeMax)),
Type: types.StringValue(item.Type),
}
items = append(items, i)
}
state.Items = items
tflog.Info(ctx, "flattens.AccountDisksListDataSource: after flatten")
tflog.Info(ctx, "End flattens.AccountDisksListDataSource")
return nil
}

@ -0,0 +1,81 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities"
)
// AccountFlipgroupsListDataSource flattens data source for account flipgroups list.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func AccountFlipgroupsListDataSource(ctx context.Context, state *models.DataSourceAccountFlipgroupsListModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.AccountFlipgroupsListDataSource")
diags := diag.Diagnostics{}
flipgroups, err := utilities.AccountFlipgroupsListCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Cannot get info about account flipgroups list", err.Error())
return diags
}
tflog.Info(ctx, "flattens.AccountFlipgroupsListDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceAccountFlipgroupsListModel{
AccountID: state.AccountID,
Name: state.Name,
VINSID: state.VINSID,
VINSName: state.VINSName,
ExtNetID: state.ExtNetID,
ByIP: state.ByIP,
FLIPGroupID: state.FLIPGroupID,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
EntryCount: types.Int64Value(int64(flipgroups.EntryCount)),
}
items := make([]models.ItemAccountFlipgroupModel, 0, len(flipgroups.Data))
for _, item := range flipgroups.Data {
i := models.ItemAccountFlipgroupModel{
AccountID: types.Int64Value(int64(item.AccountID)),
ClientType: types.StringValue(item.ClientType),
ConnType: types.StringValue(item.ConnType),
CreatedBy: types.StringValue(item.CreatedBy),
CreatedTime: types.Int64Value(int64(item.CreatedTime)),
DefaultGW: types.StringValue(item.DefaultGW),
DeletedBy: types.StringValue(item.DeletedBy),
DeletedTime: types.Int64Value(int64(item.DeletedTime)),
Description: types.StringValue(item.Description),
GID: types.Int64Value(int64(item.GID)),
GUID: types.Int64Value(int64(item.GUID)),
ID: types.Int64Value(int64(item.ID)),
IP: types.StringValue(item.IP),
Milestones: types.Int64Value(int64(item.Milestones)),
Name: types.StringValue(item.Name),
NetID: types.Int64Value(int64(item.NetID)),
NetType: types.StringValue(item.NetType),
NetMask: types.Int64Value(int64(item.Netmask)),
Status: types.StringValue(item.Status),
UpdatedBy: types.StringValue(item.UpdatedBy),
UpdatedTime: types.Int64Value(int64(item.UpdatedTime)),
}
items = append(items, i)
}
state.Items = items
tflog.Info(ctx, "End flattens.AccountFlipgroupsListDataSource")
return nil
}

@ -0,0 +1,91 @@
package flattens
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities"
)
// AccountGetResourceConsumptionDataSource flattens data source for account.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func AccountGetResourceConsumptionDataSource(ctx context.Context, state *models.AccountGetResourceConsumptionModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.AccountGetResourceConsumptionDataSource")
diags := diag.Diagnostics{}
accountId := uint64(state.AccountID.ValueInt64())
record, err := utilities.AccountGetResourceConsumptionDataSourceCheckPresence(ctx, accountId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about account with ID %v", accountId), err.Error())
return diags
}
tflog.Info(ctx, "flattens.AccountGetResourceConsumptionDataSource: before flatten", map[string]any{"account_id": accountId, "record": record})
*state = models.AccountGetResourceConsumptionModel{
AccountID: state.AccountID,
Timeouts: state.Timeouts,
Consumed: &models.ResourceConsumptionModel{
CPU: types.Int64Value(record.Consumed.CPU),
DiskSize: types.Float64Value(record.Consumed.DiskSize),
DiskSizeMax: types.Float64Value(record.Consumed.DiskSizeMax),
ExtIPs: types.Int64Value(record.Consumed.ExtIPs),
ExtTraffic: types.Int64Value(record.Consumed.ExtTraffic),
GPU: types.Int64Value(record.Consumed.GPU),
RAM: types.Int64Value(record.Consumed.RAM),
SEPs: flattenResourceConsumptionSep(ctx, record.Consumed.SEPs),
},
Limits: &models.ResourceConsumptionLimitsModel{
CUC: types.Float64Value(record.ResourceLimits.CuC),
CUD: types.Float64Value(record.ResourceLimits.CuD),
CUI: types.Float64Value(record.ResourceLimits.CuI),
CUM: types.Float64Value(record.ResourceLimits.CuM),
CUDM: types.Float64Value(record.ResourceLimits.CuDM),
CUNP: types.Float64Value(record.ResourceLimits.CuNP),
GPUUnits: types.Float64Value(record.ResourceLimits.GPUUnits),
},
Reserved: &models.ResourceConsumptionModel{
CPU: types.Int64Value(record.Reserved.CPU),
DiskSize: types.Float64Value(record.Reserved.DiskSize),
DiskSizeMax: types.Float64Value(record.Reserved.DiskSizeMax),
ExtIPs: types.Int64Value(record.Reserved.ExtIPs),
ExtTraffic: types.Int64Value(record.Reserved.ExtTraffic),
GPU: types.Int64Value(record.Reserved.GPU),
RAM: types.Int64Value(record.Reserved.RAM),
SEPs: flattenResourceConsumptionSep(ctx, record.Reserved.SEPs),
},
}
tflog.Info(ctx, "flattens.AccountGetResourceConsumptionDataSource: after flatten", map[string]any{"account_id": state.AccountID.ValueInt64()})
tflog.Info(ctx, "End flattens.AccountGetResourceConsumptionDataSource", map[string]any{"account_id": state.AccountID.ValueInt64()})
return nil
}
func flattenResourceConsumptionSep(ctx context.Context, seps map[string]map[string]account.DiskUsage) []models.ResourceConsumptionSepModel {
tflog.Info(ctx, "Start flattenResourceConsumption")
res := make([]models.ResourceConsumptionSepModel, 0, len(seps))
for sepId := range seps {
for poolName, diskData := range seps[sepId] {
s := models.ResourceConsumptionSepModel{
SepID: types.StringValue(sepId),
PoolName: types.StringValue(poolName),
DiskSize: types.Float64Value(diskData.DiskSize),
DiskSizeMax: types.Float64Value(diskData.DiskSizeMax),
}
res = append(res, s)
}
}
tflog.Info(ctx, "End flattenResourceConsumption")
return res
}

@ -0,0 +1,104 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities"
)
// AccountListDeletedDataSource flattens data source for account list deleted.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func AccountListDeletedDataSource(ctx context.Context, state *models.DataSourceAccountListDeletedModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.AccountListDeletedDataSource")
diags := diag.Diagnostics{}
accListDel, err := utilities.AccountListDeletedCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Cannot get info about account list deleted", err.Error())
return diags
}
tflog.Info(ctx, "flattens.AccountListDeletedDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceAccountListDeletedModel{
ByID: state.ByID,
Name: state.Name,
ACL: state.ACL,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
SortBy: state.SortBy,
Id: types.StringValue(id.String()),
EntryCount: types.Int64Value(int64(accListDel.EntryCount)),
}
items := make([]models.ItemAccountListDeletedModel, 0, len(accListDel.Data))
for _, item := range accListDel.Data {
i := models.ItemAccountListDeletedModel{
DCLocation: types.StringValue(item.DCLocation),
Ckey: types.StringValue(item.CKey),
Meta: flattens.FlattenSimpleTypeToList(ctx, types.StringType, item.Meta),
ComputeFeatures: flattens.FlattenSimpleTypeToList(ctx, types.StringType, item.ComputeFeatures),
Company: types.StringValue(item.Company),
Companyurl: types.StringValue(item.CompanyURL),
CpuAllocationParameter: types.StringValue(item.CPUAllocationParameter),
CpuAllocationRatio: types.Float64Value(float64(item.CPUAllocationRatio)),
CreatedTime: types.Int64Value(int64(item.CreatedTime)),
CreatedBy: types.StringValue(item.CreatedBy),
DeletedTime: types.Int64Value(int64(item.DeletedTime)),
DeletedBy: types.StringValue(item.DeletedBy),
AccountID: types.Int64Value(int64(item.ID)),
GUID: types.Int64Value(int64(item.GUID)),
AccountName: types.StringValue(item.Name),
Status: types.StringValue(item.Status),
UpdatedTime: types.Int64Value(int64(item.UpdatedTime)),
ResourceLimits: &models.AccountLimitsModel{
CUC: types.Float64Value(float64(item.ResourceLimits.CuC)),
CUD: types.Float64Value(float64(item.ResourceLimits.CuD)),
CUI: types.Float64Value(float64(item.ResourceLimits.CuI)),
CUM: types.Float64Value(float64(item.ResourceLimits.CuM)),
CUDM: types.Float64Value(float64(item.ResourceLimits.CuDM)),
CUNP: types.Float64Value(float64(item.ResourceLimits.CuNP)),
GPUUnits: types.Float64Value(float64(item.ResourceLimits.GPUUnits)),
},
ResourceTypes: flattens.FlattenSimpleTypeToList(ctx, types.StringType, item.ResTypes),
SendAccessEmails: types.BoolValue(item.SendAccessEmails),
UniqPools: flattens.FlattenSimpleTypeToList(ctx, types.StringType, item.UniqPools),
Version: types.Int64Value(int64(item.Version)),
Vins: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, item.VINS),
}
aclList := make([]models.RecordACLModel, 0, len(item.ACL))
for _, acl := range item.ACL {
a := models.RecordACLModel{
Explicit: types.BoolValue(acl.Explicit),
GUID: types.StringValue(acl.GUID),
Right: types.StringValue(acl.Right),
Status: types.StringValue(acl.Status),
Type: types.StringValue(acl.Type),
UserGroupID: types.StringValue(acl.UserGroupID),
}
aclList = append(aclList, a)
}
i.ACL = aclList
items = append(items, i)
}
state.Items = items
tflog.Info(ctx, "flattens.AccountListDeletedDataSource: after flatten")
tflog.Info(ctx, "End flattens.AccountListDeletedDataSource")
return nil
}

@ -0,0 +1,97 @@
package flattens
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities"
)
// AccountGetResourceConsumptionList flattens data source for rg get resource consumption.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func AccountGetResourceConsumptionList(ctx context.Context, state *models.AccountGetResourceConsumptionListModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.AccountGetResourceConsumptionList")
diags := diag.Diagnostics{}
resConsList, err := utilities.AccountGetResourceConsumptionListDataSourceCheckPresence(ctx, c)
if err != nil {
diags.AddError("Cannot get info about resource consumptions", err.Error())
return diags
}
tflog.Info(ctx, "flattens.AccountGetResourceConsumptionList: before flatten")
*state = models.AccountGetResourceConsumptionListModel{
EntryCount: state.EntryCount,
Timeouts: state.Timeouts,
}
items := make([]models.AccountGetResourceConsumptionListItemModel, 0, len(resConsList.Data))
for _, resConsItem := range resConsList.Data {
item := models.AccountGetResourceConsumptionListItemModel{
AccountId: types.Int64Value(int64(resConsItem.AccountID)),
Consumed: &models.ResourceConsumptionListModel{
CPU: types.Int64Value(resConsItem.Consumed.CPU),
DiskSize: types.Float64Value(resConsItem.Consumed.DiskSize),
DiskSizeMax: types.Float64Value(resConsItem.Consumed.DiskSizeMax),
ExtIPs: types.Int64Value(resConsItem.Consumed.ExtIPs),
ExtTraffic: types.Int64Value(resConsItem.Consumed.ExtTraffic),
GPU: types.Int64Value(resConsItem.Consumed.GPU),
RAM: types.Int64Value(resConsItem.Consumed.RAM),
},
Reserved: &models.ResourceConsumptionListModel{
CPU: types.Int64Value(resConsItem.Reserved.CPU),
DiskSize: types.Float64Value(resConsItem.Reserved.DiskSize),
DiskSizeMax: types.Float64Value(resConsItem.Reserved.DiskSizeMax),
ExtIPs: types.Int64Value(resConsItem.Reserved.ExtIPs),
ExtTraffic: types.Int64Value(resConsItem.Reserved.ExtTraffic),
GPU: types.Int64Value(resConsItem.Reserved.GPU),
RAM: types.Int64Value(resConsItem.Reserved.RAM),
},
}
sepsConsumed := make([]models.ResourceConsumptionSepListModel, 0, len(resConsItem.Consumed.SEPs))
for sepId, data := range resConsItem.Consumed.SEPs {
for dataName, diskData := range data {
sepItem := models.ResourceConsumptionSepListModel{
SepID: types.StringValue(sepId),
PoolName: types.StringValue(dataName),
DiskSize: types.Float64Value(diskData.DiskSize),
DiskSizeMax: types.Float64Value(diskData.DiskSizeMax),
}
sepsConsumed = append(sepsConsumed, sepItem)
}
}
item.Consumed.SEPs = sepsConsumed
sepsReserved := make([]models.ResourceConsumptionSepListModel, 0, len(resConsItem.Reserved.SEPs))
for sepId, data := range resConsItem.Reserved.SEPs {
for dataName, diskData := range data {
sepItem := models.ResourceConsumptionSepListModel{
SepID: types.StringValue(sepId),
PoolName: types.StringValue(dataName),
DiskSize: types.Float64Value(diskData.DiskSize),
DiskSizeMax: types.Float64Value(diskData.DiskSizeMax),
}
sepsReserved = append(sepsReserved, sepItem)
}
}
item.Reserved.SEPs = sepsReserved
items = append(items, item)
}
state.Items = items
state.EntryCount = types.Int64Value(int64(resConsList.EntryCount))
tflog.Info(ctx, "flattens.AccountGetResourceConsumptionList: after flatten")
tflog.Info(ctx, "End flattens.AccountGetResourceConsumptionList")
return nil
}

@ -0,0 +1,124 @@
package flattens
import (
"context"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities"
)
// AccountRGListDataSource flattens data source for account rg list.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func AccountRGListDataSource(ctx context.Context, state *models.DataSourceAccountRGListModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.AccountRGListDataSource")
diags := diag.Diagnostics{}
rgList, err := utilities.AccountRGListCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Cannot get info about account rg list", err.Error())
return diags
}
tflog.Info(ctx, "flattens.AccountRGListDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceAccountRGListModel{
AccountID: state.AccountID,
RGID: state.RGID,
VinsID: state.VinsID,
VMID: state.VMID,
Name: state.Name,
Status: state.Status,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
SortBy: state.SortBy,
Id: types.StringValue(id.String()),
EntryCount: types.Int64Value(int64(rgList.EntryCount)),
}
items := make([]models.ItemAccountRGModel, 0, len(rgList.Data))
for _, item := range rgList.Data {
i := models.ItemAccountRGModel{
Computes: &models.RGComputesModel{
Started: types.Int64Value(int64(item.Computes.Started)),
Stopped: types.Int64Value(int64(item.Computes.Stopped)),
},
Resources: &models.RGResourcesModel{
Consumed: &models.ResourceModel{
CPU: types.Int64Value(item.Resources.Consumed.CPU),
DiskSize: types.Float64Value(item.Resources.Consumed.DiskSize),
DiskSizeMax: types.Float64Value(item.Resources.Consumed.DiskSizeMax),
ExtIPs: types.Int64Value(item.Resources.Consumed.ExtIPs),
ExtTraffic: types.Int64Value(item.Resources.Consumed.ExtTraffic),
GPU: types.Int64Value(item.Resources.Consumed.GPU),
RAM: types.Int64Value(item.Resources.Consumed.RAM),
SEPs: flattenSep(item.Resources.Consumed.SEPs),
},
Limits: &models.LimitsRGModel{
CPU: types.Int64Value(item.Resources.Limits.CPU),
DiskSize: types.Int64Value(item.Resources.Limits.DiskSize),
DiskSizeMax: types.Int64Value(item.Resources.Limits.DiskSizeMax),
ExtIPs: types.Int64Value(item.Resources.Limits.ExtIPs),
ExtTraffic: types.Int64Value(item.Resources.Limits.ExtTraffic),
GPU: types.Int64Value(item.Resources.Limits.GPU),
RAM: types.Int64Value(item.Resources.Limits.RAM),
SEPs: types.Int64Value(int64(item.Resources.Limits.SEPs)),
},
Reserved: &models.ResourceModel{
CPU: types.Int64Value(item.Resources.Reserved.CPU),
DiskSize: types.Float64Value(item.Resources.Reserved.DiskSize),
DiskSizeMax: types.Float64Value(item.Resources.Reserved.DiskSizeMax),
ExtIPs: types.Int64Value(item.Resources.Reserved.ExtIPs),
ExtTraffic: types.Int64Value(item.Resources.Reserved.ExtTraffic),
GPU: types.Int64Value(item.Resources.Reserved.GPU),
RAM: types.Int64Value(item.Resources.Reserved.RAM),
SEPs: flattenSep(item.Resources.Reserved.SEPs),
},
},
CreatedBy: types.StringValue(item.CreatedBy),
CreatedTime: types.Int64Value(int64(item.CreatedTime)),
DeletedBy: types.StringValue(item.DeletedBy),
DeletedTime: types.Int64Value(int64(item.DeletedTime)),
RGID: types.Int64Value(int64(item.ID)),
Milestones: types.Int64Value(int64(item.Milestones)),
RGName: types.StringValue(item.Name),
Status: types.StringValue(item.Status),
UpdatedBy: types.StringValue(item.UpdatedBy),
UpdatedTime: types.Int64Value(int64(item.UpdatedTime)),
VINSes: types.Int64Value(int64(item.VINSes)),
}
items = append(items, i)
}
state.Items = items
tflog.Info(ctx, "End flattens.AccountRGListDataSource")
return nil
}
func flattenSep(seps map[string]map[string]account.DiskUsage) []models.SepModel {
res := make([]models.SepModel, 0, len(seps))
for sepId := range seps {
for poolName, diskData := range seps[sepId] {
s := models.SepModel{
SepID: types.StringValue(sepId),
PoolName: types.StringValue(poolName),
DiskSize: types.Float64Value(diskData.DiskSize),
DiskSizeMax: types.Float64Value(diskData.DiskSizeMax),
}
res = append(res, s)
}
}
return res
}

@ -0,0 +1,171 @@
package flattens
import (
"context"
"fmt"
"strconv"
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities"
)
// AccountResource flattens resource for account.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func AccountResource(ctx context.Context, state *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.AccountResource")
diags := diag.Diagnostics{}
accountId := uint64(state.AccountID.ValueInt64())
if accountId == 0 {
id, err := strconv.Atoi(state.Id.ValueString())
if err != nil {
diags.AddError(
"flattens.AccountResource: cannot parse resource ID from state",
err.Error())
return diags
}
accountId = uint64(id)
}
recordAccount, err := utilities.AccountResourceCheckPresence(ctx, accountId, c)
if err != nil {
diags.AddError(fmt.Sprintf("flattens.AccountResource: Cannot get info about resource with ID %v", accountId), err.Error())
return diags
}
tflog.Info(ctx, "flattens.AccountResource: before flatten", map[string]any{"account_id": accountId, "recordAccount": recordAccount})
*state = models.ResourceAccountModel{
AccountName: types.StringValue(recordAccount.Name),
Username: state.Username,
EmailAddress: state.EmailAddress,
SendAccessEmails: state.SendAccessEmails,
Users: state.Users,
Restore: state.Restore,
Permanently: state.Permanently,
Enable: state.Enable,
ResourceLimits: flattenResourceLimitsInAccountResource(ctx, recordAccount.ResourceLimits, state),
AvailableTemplates: state.AvailableTemplates,
Timeouts: state.Timeouts,
Id: types.StringValue(strconv.Itoa(int(accountId))),
AccountID: types.Int64Value(int64(recordAccount.ID)),
DCLocation: types.StringValue(recordAccount.DCLocation),
CKey: types.StringValue(recordAccount.CKey),
ACL: resourceFlattenACLInAccount(ctx, recordAccount.ACL),
Company: types.StringValue(recordAccount.Company),
CompanyURL: types.StringValue(recordAccount.CompanyURL),
CPUAllocationParameter: types.StringValue(recordAccount.CPUAllocationParameter),
CPUAllocationRatio: types.Float64Value(recordAccount.CPUAllocationRatio),
CreatedBy: types.StringValue(recordAccount.CreatedBy),
CreatedTime: types.Int64Value(int64(recordAccount.CreatedTime)),
DeactivationTime: types.Float64Value(recordAccount.DeactivationTime),
DeletedBy: types.StringValue(recordAccount.DeletedBy),
DeletedTime: types.Int64Value(int64(recordAccount.DeletedTime)),
DisplayName: types.StringValue(recordAccount.DisplayName),
GUID: types.Int64Value(int64(recordAccount.GUID)),
Status: types.StringValue(recordAccount.Status),
UpdatedTime: types.Int64Value(int64(recordAccount.UpdatedTime)),
Version: types.Int64Value(int64(recordAccount.Version)),
}
state.VINS, diags = types.ListValueFrom(ctx, types.Int64Type, recordAccount.VINS)
if diags.HasError() {
tflog.Error(ctx, fmt.Sprint("flattens.AccountResource: cannot flatten recordAccount.VINS to state.VINS", diags))
}
state.UniqPools, diags = types.ListValueFrom(ctx, types.StringType, recordAccount.UniqPools)
if diags.HasError() {
tflog.Error(ctx, fmt.Sprint("flattens.AccountResource: cannot flatten recordAccount.UniqPools to state.UniqPools", diags))
}
state.ResourceTypes, diags = types.ListValueFrom(ctx, types.Int64Type, recordAccount.ResTypes)
if diags.HasError() {
tflog.Error(ctx, fmt.Sprint("flattens.AccountResource: cannot flatten recordAccount.ResTypes to state.ResourceTypes", diags))
}
state.ComputeFeatures, diags = types.SetValueFrom(ctx, types.StringType, recordAccount.ComputeFeatures)
if diags.HasError() {
tflog.Error(ctx, fmt.Sprint("flattens.AccountResource: cannot flatten recordAccount.ComputeFeatures to state.ComputeFeatures", diags))
}
tflog.Info(ctx, "flattens.AccountResource: after flatten", map[string]any{"account_id": state.Id.ValueString()})
tflog.Info(ctx, "End flattens.AccountResource", map[string]any{"account_id": state.Id.ValueString()})
return nil
}
func flattenResourceLimitsInAccountResource(ctx context.Context, limits account.ResourceLimits, state *models.ResourceAccountModel) types.Object {
tflog.Info(ctx, "Start flattenResourceLimitsInAccountResource")
diags := diag.Diagnostics{}
var resourceLimits models.ResourceLimitsInAccountResourceModel
diags.Append(state.ResourceLimits.As(ctx, &resourceLimits, basetypes.ObjectAsOptions{})...)
if diags.HasError() {
tflog.Error(ctx, "flattenResourceLimitsInAccountResource: cannot populate resourceLimits with plan.ResourceLimits object element")
}
if resourceLimits.CUC.ValueFloat64() == 0 {
resourceLimits.CUC = types.Float64Value(limits.CuC)
}
if resourceLimits.CUD.ValueFloat64() == 0 {
resourceLimits.CUD = types.Float64Value(limits.CuD)
}
if resourceLimits.CUI.ValueFloat64() == 0 {
resourceLimits.CUI = types.Float64Value(limits.CuI)
}
if resourceLimits.CUM.ValueFloat64() == 0 {
resourceLimits.CUM = types.Float64Value(limits.CuM)
}
if resourceLimits.CUNP.ValueFloat64() == 0 {
resourceLimits.CUNP = types.Float64Value(limits.CuNP)
}
if resourceLimits.GPUUnits.ValueFloat64() == 0 {
resourceLimits.GPUUnits = types.Float64Value(limits.GPUUnits)
}
res, err := types.ObjectValueFrom(ctx, models.ItemResourceLimitsInAccountResource, resourceLimits)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenResourceLimitsInAccountResource struct to obj", err))
}
tflog.Info(ctx, "End flattenResourceLimitsInAccountResource")
return res
}
func resourceFlattenACLInAccount(ctx context.Context, aclList []account.ACL) types.List {
tflog.Info(ctx, "Start flattenACLInAccount")
tempSlice := make([]types.Object, 0, len(aclList))
for _, item := range aclList {
temp := models.ACLInAccountModel{
Explicit: types.BoolValue(item.Explicit),
GUID: types.StringValue(item.GUID),
Right: types.StringValue(item.Right),
Status: types.StringValue(item.Status),
Type: types.StringValue(item.Type),
UserGroupID: types.StringValue(item.UserGroupID),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemACLInAccount, temp)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenACLInAccount struct to obj", diags))
}
tempSlice = append(tempSlice, obj)
}
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemACLInAccount}, tempSlice)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenACLInAccount", diags))
}
tflog.Info(ctx, "End flattenACLInAccount")
return res
}

@ -0,0 +1,24 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceAccountAuditsListModel struct {
// request fields
AccountID types.Int64 `tfsdk:"account_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemAuditModel `tfsdk:"items"`
}
type ItemAuditModel struct {
Call types.String `tfsdk:"call"`
ResponseTime types.Float64 `tfsdk:"responsetime"`
StatusCode types.Int64 `tfsdk:"statuscode"`
Timestamp types.Float64 `tfsdk:"timestamp"`
User types.String `tfsdk:"user"`
}

@ -0,0 +1,15 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceAccountAvailableTemplatesListModel struct {
// request fields
AccountID types.Int64 `tfsdk:"account_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Items types.List `tfsdk:"items"`
}

@ -0,0 +1,50 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ListComputesModel struct {
// request fields
AccountID types.Int64 `tfsdk:"account_id"`
ComputeID types.Int64 `tfsdk:"compute_id"`
Name types.String `tfsdk:"name"`
RGName types.String `tfsdk:"rg_name"`
RGID types.Int64 `tfsdk:"rg_id"`
TechStatus types.String `tfsdk:"tech_status"`
IpAddress types.String `tfsdk:"ip_address"`
ExtNetName types.String `tfsdk:"extnet_name"`
ExtNetID types.Int64 `tfsdk:"extnet_id"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Items []ItemComputeModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemComputeModel struct {
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
CPUs types.Int64 `tfsdk:"cpus"`
CreatedBy types.String `tfsdk:"created_by"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedBy types.String `tfsdk:"deleted_by"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
ID types.Int64 `tfsdk:"compute_id"`
Name types.String `tfsdk:"compute_name"`
RAM types.Int64 `tfsdk:"ram"`
Registered types.Bool `tfsdk:"registered"`
RGID types.Int64 `tfsdk:"rg_id"`
RgName types.String `tfsdk:"rg_name"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
TotalDisksSize types.Int64 `tfsdk:"total_disks_size"`
UpdatedBy types.String `tfsdk:"updated_by"`
UpdatedTime types.Int64 `tfsdk:"updated_time"`
UserManaged types.Bool `tfsdk:"user_managed"`
VINSConnected types.Int64 `tfsdk:"vins_connected"`
}

@ -0,0 +1,36 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceAccountDisksListModel struct {
// required fields
AccountID types.Int64 `tfsdk:"account_id"`
// optional fields
DiskID types.Int64 `tfsdk:"disk_id"`
Name types.String `tfsdk:"name"`
DiskMaxSize types.Int64 `tfsdk:"disk_max_size"`
Type types.String `tfsdk:"type"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
SortBy types.String `tfsdk:"sort_by"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemDiskModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemDiskModel struct {
DiskID types.Int64 `tfsdk:"disk_id"`
DiskName types.String `tfsdk:"disk_name"`
Pool types.String `tfsdk:"pool"`
SEPID types.Int64 `tfsdk:"sep_id"`
Shareable types.Bool `tfsdk:"shareable"`
SizeMax types.Int64 `tfsdk:"size_max"`
Type types.String `tfsdk:"type"`
}

@ -0,0 +1,50 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceAccountFlipgroupsListModel struct {
// optional and required fields
AccountID types.Int64 `tfsdk:"account_id"`
Name types.String `tfsdk:"name"`
VINSID types.Int64 `tfsdk:"vins_id"`
VINSName types.String `tfsdk:"vins_name"`
ExtNetID types.Int64 `tfsdk:"extnet_id"`
ByIP types.String `tfsdk:"by_ip"`
FLIPGroupID types.Int64 `tfsdk:"flipgroup_id"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemAccountFlipgroupModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemAccountFlipgroupModel struct {
AccountID types.Int64 `tfsdk:"account_id"`
ClientType types.String `tfsdk:"client_type"`
ConnType types.String `tfsdk:"conn_type"`
CreatedBy types.String `tfsdk:"created_by"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DefaultGW types.String `tfsdk:"default_gw"`
DeletedBy types.String `tfsdk:"deleted_by"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Description types.String `tfsdk:"desc"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
ID types.Int64 `tfsdk:"fg_id"`
IP types.String `tfsdk:"ip"`
Milestones types.Int64 `tfsdk:"milestones"`
Name types.String `tfsdk:"fg_name"`
NetID types.Int64 `tfsdk:"net_id"`
NetType types.String `tfsdk:"net_type"`
NetMask types.Int64 `tfsdk:"netmask"`
Status types.String `tfsdk:"status"`
UpdatedBy types.String `tfsdk:"updated_by"`
UpdatedTime types.Int64 `tfsdk:"updated_time"`
}

@ -0,0 +1,45 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type AccountGetResourceConsumptionModel struct {
// request fields
AccountID types.Int64 `tfsdk:"account_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Consumed *ResourceConsumptionModel `tfsdk:"consumed"`
Reserved *ResourceConsumptionModel `tfsdk:"reserved"`
Limits *ResourceConsumptionLimitsModel `tfsdk:"resource_limits"`
}
type ResourceConsumptionModel struct {
CPU types.Int64 `tfsdk:"cpu"`
DiskSize types.Float64 `tfsdk:"disksize"`
DiskSizeMax types.Float64 `tfsdk:"disksizemax"`
ExtIPs types.Int64 `tfsdk:"extips"`
ExtTraffic types.Int64 `tfsdk:"exttraffic"`
GPU types.Int64 `tfsdk:"gpu"`
RAM types.Int64 `tfsdk:"ram"`
SEPs []ResourceConsumptionSepModel `tfsdk:"seps"`
}
type ResourceConsumptionSepModel struct {
SepID types.String `tfsdk:"sep_id"`
PoolName types.String `tfsdk:"data_name"`
DiskSize types.Float64 `tfsdk:"disk_size"`
DiskSizeMax types.Float64 `tfsdk:"disk_size_max"`
}
type ResourceConsumptionLimitsModel struct {
CUC types.Float64 `tfsdk:"cu_c"`
CUD types.Float64 `tfsdk:"cu_d"`
CUI types.Float64 `tfsdk:"cu_i"`
CUM types.Float64 `tfsdk:"cu_m"`
CUDM types.Float64 `tfsdk:"cu_dm"`
CUNP types.Float64 `tfsdk:"cu_np"`
GPUUnits types.Float64 `tfsdk:"gpu_units"`
}

@ -0,0 +1,68 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceAccountListDeletedModel struct {
// optional fields
ByID types.Int64 `tfsdk:"by_id"`
Name types.String `tfsdk:"name"`
ACL types.String `tfsdk:"acl"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
SortBy types.String `tfsdk:"sort_by"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemAccountListDeletedModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemAccountListDeletedModel struct {
DCLocation types.String `tfsdk:"dc_location"`
Ckey types.String `tfsdk:"ckey"`
Meta types.List `tfsdk:"meta"`
ACL []RecordACLModel `tfsdk:"acl"`
ComputeFeatures types.List `tfsdk:"compute_features"`
Company types.String `tfsdk:"company"`
Companyurl types.String `tfsdk:"companyurl"`
CpuAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"`
CpuAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"`
CreatedTime types.Int64 `tfsdk:"created_time"`
CreatedBy types.String `tfsdk:"created_by"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
DeletedBy types.String `tfsdk:"deleted_by"`
AccountID types.Int64 `tfsdk:"account_id"`
GUID types.Int64 `tfsdk:"guid"`
AccountName types.String `tfsdk:"account_name"`
Status types.String `tfsdk:"status"`
UpdatedTime types.Int64 `tfsdk:"updated_time"`
ResourceLimits *AccountLimitsModel `tfsdk:"resource_limits"`
ResourceTypes types.List `tfsdk:"resource_types"`
SendAccessEmails types.Bool `tfsdk:"send_access_emails"`
UniqPools types.List `tfsdk:"uniq_pools"`
Version types.Int64 `tfsdk:"version"`
Vins types.List `tfsdk:"vins"`
}
type RecordACLModel struct {
Explicit types.Bool `tfsdk:"explicit"`
GUID types.String `tfsdk:"guid"`
Right types.String `tfsdk:"right"`
Status types.String `tfsdk:"status"`
Type types.String `tfsdk:"type"`
UserGroupID types.String `tfsdk:"user_group_id"`
}
type AccountLimitsModel struct {
CUC types.Float64 `tfsdk:"cu_c"`
CUD types.Float64 `tfsdk:"cu_d"`
CUI types.Float64 `tfsdk:"cu_i"`
CUM types.Float64 `tfsdk:"cu_m"`
CUDM types.Float64 `tfsdk:"cu_dm"`
CUNP types.Float64 `tfsdk:"cu_np"`
GPUUnits types.Float64 `tfsdk:"gpu_units"`
}

@ -0,0 +1,40 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type AccountGetResourceConsumptionListModel struct {
// request fields
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Items []AccountGetResourceConsumptionListItemModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type AccountGetResourceConsumptionListItemModel struct {
// response fields
AccountId types.Int64 `tfsdk:"account_id"`
Consumed *ResourceConsumptionListModel `tfsdk:"consumed"`
Reserved *ResourceConsumptionListModel `tfsdk:"reserved"`
}
type ResourceConsumptionListModel struct {
CPU types.Int64 `tfsdk:"cpu"`
DiskSize types.Float64 `tfsdk:"disksize"`
DiskSizeMax types.Float64 `tfsdk:"disksizemax"`
ExtIPs types.Int64 `tfsdk:"extips"`
ExtTraffic types.Int64 `tfsdk:"exttraffic"`
GPU types.Int64 `tfsdk:"gpu"`
RAM types.Int64 `tfsdk:"ram"`
SEPs []ResourceConsumptionSepListModel `tfsdk:"seps"`
}
type ResourceConsumptionSepListModel struct {
SepID types.String `tfsdk:"sep_id"`
PoolName types.String `tfsdk:"data_name"`
DiskSize types.Float64 `tfsdk:"disk_size"`
DiskSizeMax types.Float64 `tfsdk:"disk_size_max"`
}

@ -0,0 +1,81 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceAccountRGListModel struct {
// optional and required fields
AccountID types.Int64 `tfsdk:"account_id"`
RGID types.Int64 `tfsdk:"rg_id"`
VinsID types.Int64 `tfsdk:"vins_id"`
VMID types.Int64 `tfsdk:"vm_id"`
Name types.String `tfsdk:"name"`
Status types.String `tfsdk:"status"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
SortBy types.String `tfsdk:"sort_by"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemAccountRGModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemAccountRGModel struct {
Computes *RGComputesModel `tfsdk:"computes"`
Resources *RGResourcesModel `tfsdk:"resources"`
CreatedBy types.String `tfsdk:"created_by"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedBy types.String `tfsdk:"deleted_by"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
RGID types.Int64 `tfsdk:"rg_id"`
Milestones types.Int64 `tfsdk:"milestones"`
RGName types.String `tfsdk:"rg_name"`
Status types.String `tfsdk:"status"`
UpdatedBy types.String `tfsdk:"updated_by"`
UpdatedTime types.Int64 `tfsdk:"updated_time"`
VINSes types.Int64 `tfsdk:"vinses"`
}
type RGComputesModel struct {
Started types.Int64 `tfsdk:"started"`
Stopped types.Int64 `tfsdk:"stopped"`
}
type RGResourcesModel struct {
Consumed *ResourceModel `tfsdk:"consumed"`
Limits *LimitsRGModel `tfsdk:"limits"`
Reserved *ResourceModel `tfsdk:"reserved"`
}
type LimitsRGModel struct {
CPU types.Int64 `tfsdk:"cpu"`
DiskSize types.Int64 `tfsdk:"disksize"`
DiskSizeMax types.Int64 `tfsdk:"disksizemax"`
ExtIPs types.Int64 `tfsdk:"extips"`
ExtTraffic types.Int64 `tfsdk:"exttraffic"`
GPU types.Int64 `tfsdk:"gpu"`
RAM types.Int64 `tfsdk:"ram"`
SEPs types.Int64 `tfsdk:"seps"`
}
type ResourceModel struct {
CPU types.Int64 `tfsdk:"cpu"`
DiskSize types.Float64 `tfsdk:"disksize"`
DiskSizeMax types.Float64 `tfsdk:"disksizemax"`
ExtIPs types.Int64 `tfsdk:"extips"`
ExtTraffic types.Int64 `tfsdk:"exttraffic"`
GPU types.Int64 `tfsdk:"gpu"`
RAM types.Int64 `tfsdk:"ram"`
SEPs []SepModel `tfsdk:"seps"`
}
type SepModel struct {
SepID types.String `tfsdk:"sep_id"`
PoolName types.String `tfsdk:"data_name"`
DiskSize types.Float64 `tfsdk:"disk_size"`
DiskSizeMax types.Float64 `tfsdk:"disk_size_max"`
}

@ -0,0 +1,91 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ResourceAccountModel struct {
// request fields - required
AccountName types.String `tfsdk:"account_name"`
Username types.String `tfsdk:"username"`
// request fields - optional
EmailAddress types.String `tfsdk:"emailaddress"`
SendAccessEmails types.Bool `tfsdk:"send_access_emails"`
Users types.List `tfsdk:"users"`
UniqPools types.List `tfsdk:"uniq_pools"`
CPUAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"`
CPUAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"`
AvailableTemplates types.Set `tfsdk:"available_templates"`
Restore types.Bool `tfsdk:"restore"`
Permanently types.Bool `tfsdk:"permanently"`
Enable types.Bool `tfsdk:"enable"`
ResourceLimits types.Object `tfsdk:"resource_limits"`
ComputeFeatures types.Set `tfsdk:"compute_features"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
DCLocation types.String `tfsdk:"dc_location"`
AccountID types.Int64 `tfsdk:"account_id"`
CKey types.String `tfsdk:"ckey"`
ACL types.List `tfsdk:"acl"`
Company types.String `tfsdk:"company"`
CompanyURL types.String `tfsdk:"companyurl"`
CreatedBy types.String `tfsdk:"created_by"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeactivationTime types.Float64 `tfsdk:"deactivation_time"`
DeletedBy types.String `tfsdk:"deleted_by"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
DisplayName types.String `tfsdk:"displayname"`
GUID types.Int64 `tfsdk:"guid"`
ResourceTypes types.List `tfsdk:"resource_types"`
Status types.String `tfsdk:"status"`
UpdatedTime types.Int64 `tfsdk:"updated_time"`
Version types.Int64 `tfsdk:"version"`
VINS types.List `tfsdk:"vins"`
}
type UsersModel struct {
UserID types.String `tfsdk:"user_id"`
AccessType types.String `tfsdk:"access_type"`
RecursiveDelete types.Bool `tfsdk:"recursive_delete"`
}
type ResourceLimitsInAccountResourceModel struct {
CUC types.Float64 `tfsdk:"cu_c"`
CUD types.Float64 `tfsdk:"cu_d"`
CUI types.Float64 `tfsdk:"cu_i"`
CUM types.Float64 `tfsdk:"cu_m"`
CUNP types.Float64 `tfsdk:"cu_np"`
GPUUnits types.Float64 `tfsdk:"gpu_units"`
}
var ItemResourceLimitsInAccountResource = map[string]attr.Type{
"cu_c": types.Float64Type,
"cu_d": types.Float64Type,
"cu_i": types.Float64Type,
"cu_m": types.Float64Type,
"cu_np": types.Float64Type,
"gpu_units": types.Float64Type,
}
type ACLInAccountModel struct {
Explicit types.Bool `tfsdk:"explicit"`
GUID types.String `tfsdk:"guid"`
Right types.String `tfsdk:"right"`
Status types.String `tfsdk:"status"`
Type types.String `tfsdk:"type"`
UserGroupID types.String `tfsdk:"user_group_id"`
}
var ItemACLInAccount = map[string]attr.Type{
"explicit": types.BoolType,
"guid": types.StringType,
"right": types.StringType,
"status": types.StringType,
"type": types.StringType,
"user_group_id": types.StringType,
}

@ -0,0 +1,321 @@
package cbAccount
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ resource.Resource = &resourceAccount{}
_ resource.ResourceWithImportState = &resourceAccount{}
)
// NewResourceAccount is a helper function to simplify the provider implementation.
func NewResourceAccount() resource.Resource {
return &resourceAccount{}
}
// resourceAccount is the resource implementation.
type resourceAccount struct {
client *decort.DecortClient
}
// Create creates the resource and sets the initial Terraform state.
func (r *resourceAccount) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
tflog.Info(ctx, "Create resourceAccount: start creating")
var plan models.ResourceAccountModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceAccount: Error receiving the plan")
return
}
// Set timeouts
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceAccount: Error set timeout")
return
}
ctx, cancel := context.WithTimeout(ctx, createTimeout)
defer cancel()
tflog.Info(ctx, "Create resourceAccount: set timeouts successfully", map[string]any{
"createTimeout": createTimeout})
diags, id := utilities.UtilityAccountCreate(ctx, &plan, r.client)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceAccount: error with UtilityAccountCreate")
return
}
plan.Id = types.StringValue(strconv.Itoa(int(*id)))
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.AccountResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "resourceAccount: Account created", map[string]any{"account_id": id})
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
if resp.Diagnostics.HasError() {
return
}
}
// Read refreshes the Terraform state with the latest data.
func (r *resourceAccount) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
// Get current state
var state models.ResourceAccountModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceAccount: Error get state")
return
}
tflog.Info(ctx, "Read resourceAccount: got state successfully", map[string]any{"account_id": state.Id.ValueString()})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceAccount: Error set timeout")
return
}
tflog.Info(ctx, "Read resourceAccount: set timeouts successfully", map[string]any{
"account_id": state.Id.ValueString(),
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// read status
resp.Diagnostics.Append(utilities.AccountReadStatus(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceAccount: Error reading status")
return
}
// Overwrite items with refreshed state
resp.Diagnostics.Append(flattens.AccountResource(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceAccount: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceAccount: Error set state")
return
}
tflog.Info(ctx, "End read resourceAccount")
}
// Update updates the resource and sets the updated Terraform state on success.
func (r *resourceAccount) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
// Retrieve values from plan
var plan models.ResourceAccountModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceAccount: Error receiving the plan")
return
}
logMap := map[string]any{"account_id": plan.Id.ValueString()}
tflog.Info(ctx, "Update resourceAccount: got plan successfully", logMap)
// Retrieve values from state
var state models.ResourceAccountModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceAccount: Error receiving the state")
return
}
tflog.Info(ctx, "Update resourceAccount: got state successfully", logMap)
// Set timeouts
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceAccount: Error set timeout")
return
}
tflog.Info(ctx, "Update resourceAccount: set timeouts successfully", map[string]any{
"account_id": state.Id.ValueString(),
"updateTimeout": updateTimeout})
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
defer cancel()
accountId, err := strconv.Atoi(state.Id.ValueString())
if err != nil {
resp.Diagnostics.AddError("Update resourceAccount: Cannot parse ID from state", err.Error())
return
}
// enable/disable account
if !plan.Enable.Equal(state.Enable) && !plan.Enable.IsNull() {
resp.Diagnostics.Append(utilities.EnableDisableAccount(ctx, uint64(accountId), plan.Enable.ValueBool(), r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceAccount: Error enabling/disabling account")
return
}
}
// general update account
resp.Diagnostics.Append(utilities.UpdateAccount(ctx, uint64(accountId), &plan, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceAccount: Error with general account update")
return
}
// cpu_allocation_parameter
if !plan.CPUAllocationParameter.Equal(state.CPUAllocationParameter) && !plan.CPUAllocationParameter.IsNull() && plan.CPUAllocationParameter.ValueString() != "" {
resp.Diagnostics.Append(utilities.UtilityAccountCPUParameterUpdate(ctx, uint64(state.AccountID.ValueInt64()), &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceAccount: Error update CPUAllocationParameter ")
return
}
}
// cpu_allocation_ratio
if !plan.CPUAllocationRatio.Equal(state.CPUAllocationRatio) && !plan.CPUAllocationRatio.IsNull() {
resp.Diagnostics.Append(utilities.UtilityAccountCPURatioUpdate(ctx, uint64(accountId), &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceAccount: Error update CPUAllocationRatio ")
return
}
}
// add/delete users
if !plan.Users.Equal(state.Users) {
resp.Diagnostics.Append(utilities.AddDeleteUsersAccount(ctx, uint64(accountId), &plan, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceAccount: Error adding/deleting users to/from account")
return
}
}
// available_templates
if !plan.AvailableTemplates.Equal(state.AvailableTemplates) {
resp.Diagnostics.Append(utilities.UtilityAccountAvailiableTemplatesUpdate(ctx, &state, &plan, false, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceAccount: Error update AvailiableTemplates")
return
}
}
// compute_features
if !plan.ComputeFeatures.Equal(state.ComputeFeatures) {
resp.Diagnostics.Append(utilities.UtilityAccountComputeFeaturesUpdate(ctx, uint64(accountId), &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceAccount: Error update ComputeFeatures")
return
}
}
tflog.Info(ctx, "Update resourceAccount: account update is completed", logMap)
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.AccountResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set state to fully populated data
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
if resp.Diagnostics.HasError() {
return
}
}
// Delete deletes the resource and removes the Terraform state on success.
func (r *resourceAccount) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
// Get current state
var state models.ResourceAccountModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceAccount: Error get state")
return
}
tflog.Info(ctx, "Delete resourceAccount: got state successfully", map[string]any{"account_id": state.Id.ValueString()})
// Set timeouts
deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceAccount: Error set timeout")
return
}
tflog.Info(ctx, "Delete resourceAccount: set timeouts successfully", map[string]any{
"account_id": state.Id.ValueString(),
"deleteTimeout": deleteTimeout})
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
defer cancel()
permanently := state.Permanently.ValueBool()
if state.Permanently.IsNull() {
permanently = true
} // default true
// Delete existing resource group
delReq := account.DeleteRequest{
AccountID: uint64(state.AccountID.ValueInt64()),
Permanently: permanently,
}
tflog.Info(ctx, "Delete resourceAccount: before CloudBroker().Account().Delete", map[string]any{"req": delReq})
_, err := r.client.CloudBroker().Account().Delete(ctx, delReq)
if err != nil {
resp.Diagnostics.AddError("Delete resourceAccount: Error deleting account with error: ", err.Error())
return
}
tflog.Info(ctx, "End delete resourceAccount", map[string]any{"account_id": state.Id.ValueString()})
}
// Schema defines the schema for the resource.
func (r *resourceAccount) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaResourceAccount(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
},
}
}
// Metadata returns the resource type name.
func (r *resourceAccount) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_account"
}
// Configure adds the provider configured client to the resource.
func (r *resourceAccount) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure resourceAccount")
r.client = client.Resource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure resourceAccount successfully")
}
func (r *resourceAccount) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
// Retrieve import ID and save to id attribute
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
}

@ -0,0 +1,42 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceAccountAuditsList() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"account_id": schema.Int64Attribute{
Required: true,
Description: "account id",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"call": schema.StringAttribute{
Computed: true,
},
"responsetime": schema.Float64Attribute{
Computed: true,
},
"statuscode": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Float64Attribute{
Computed: true,
},
"user": schema.StringAttribute{
Computed: true,
},
},
},
},
}
}

@ -0,0 +1,19 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceAccountAvailableTemplatesList() map[string]schema.Attribute {
return map[string]schema.Attribute{
"account_id": schema.Int64Attribute{
Required: true,
},
"items": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
}
}

@ -0,0 +1,117 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceAccountComputesList() map[string]schema.Attribute {
return map[string]schema.Attribute{
"account_id": schema.Int64Attribute{
Required: true,
},
"compute_id": schema.Int64Attribute{
Optional: true,
},
"name": schema.StringAttribute{
Optional: true,
},
"rg_name": schema.StringAttribute{
Optional: true,
},
"rg_id": schema.Int64Attribute{
Optional: true,
},
"tech_status": schema.StringAttribute{
Optional: true,
},
"ip_address": schema.StringAttribute{
Optional: true,
},
"extnet_name": schema.StringAttribute{
Optional: true,
},
"extnet_id": schema.Int64Attribute{
Optional: true,
},
"sort_by": schema.StringAttribute{
Optional: true,
},
"page": schema.Int64Attribute{
Optional: true,
},
"size": schema.Int64Attribute{
Optional: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"account_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"cpus": schema.Int64Attribute{
Computed: true,
},
"created_by": schema.StringAttribute{
Computed: true,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_by": schema.StringAttribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"compute_id": schema.Int64Attribute{
Computed: true,
},
"compute_name": schema.StringAttribute{
Computed: true,
},
"ram": schema.Int64Attribute{
Computed: true,
},
"registered": schema.BoolAttribute{
Computed: true,
},
"rg_id": schema.Int64Attribute{
Computed: true,
},
"rg_name": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"total_disks_size": schema.Int64Attribute{
Computed: true,
},
"updated_by": schema.StringAttribute{
Computed: true,
},
"updated_time": schema.Int64Attribute{
Computed: true,
},
"user_managed": schema.BoolAttribute{
Computed: true,
},
"vins_connected": schema.Int64Attribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

@ -0,0 +1,81 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceAccountDisksList() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"account_id": schema.Int64Attribute{
Required: true,
Description: "account id",
},
// optional attributes
"disk_id": schema.Int64Attribute{
Optional: true,
Description: "find by disk id",
},
"name": schema.StringAttribute{
Optional: true,
Description: "find by name",
},
"disk_max_size": schema.Int64Attribute{
Optional: true,
Description: "find by max size disk",
},
"type": schema.StringAttribute{
Optional: true,
Description: "find by type of the disks",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "page number",
},
"size": schema.Int64Attribute{
Optional: true,
Description: "page size",
},
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Computed: true,
},
"disk_name": schema.StringAttribute{
Computed: true,
},
"pool": schema.StringAttribute{
Computed: true,
},
"sep_id": schema.Int64Attribute{
Computed: true,
},
"shareable": schema.BoolAttribute{
Computed: true,
},
"size_max": schema.Int64Attribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

@ -0,0 +1,131 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceAccountFlipgroupsList() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"account_id": schema.Int64Attribute{
Required: true,
Description: "ID of the account",
},
// optional attributes
"name": schema.StringAttribute{
Optional: true,
Description: "find by name",
},
"vins_id": schema.Int64Attribute{
Optional: true,
Description: "find by vins ID",
},
"vins_name": schema.StringAttribute{
Optional: true,
Description: "find by vins name",
},
"extnet_id": schema.Int64Attribute{
Optional: true,
Description: "find by extnet ID",
},
"by_ip": schema.StringAttribute{
Optional: true,
Description: "find by ip address",
},
"flipgroup_id": schema.Int64Attribute{
Optional: true,
Description: "find by flipgroup id",
},
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "page number",
},
"size": schema.Int64Attribute{
Optional: true,
Description: "page size",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"account_id": schema.Int64Attribute{
Computed: true,
},
"client_type": schema.StringAttribute{
Computed: true,
},
"conn_type": schema.StringAttribute{
Computed: true,
},
"created_by": schema.StringAttribute{
Computed: true,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"default_gw": schema.StringAttribute{
Computed: true,
},
"deleted_by": schema.StringAttribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"fg_id": schema.Int64Attribute{
Computed: true,
},
"ip": schema.StringAttribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"fg_name": schema.StringAttribute{
Computed: true,
},
"net_id": schema.Int64Attribute{
Computed: true,
},
"net_type": schema.StringAttribute{
Computed: true,
},
"netmask": schema.Int64Attribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"updated_by": schema.StringAttribute{
Computed: true,
},
"updated_time": schema.Int64Attribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

@ -0,0 +1,131 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceAccountGetResourceConsumption() map[string]schema.Attribute {
return map[string]schema.Attribute{
"account_id": schema.Int64Attribute{
Required: true,
},
"consumed": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"cpu": schema.Int64Attribute{
Computed: true,
},
"disksize": schema.Float64Attribute{
Computed: true,
},
"disksizemax": schema.Float64Attribute{
Computed: true,
},
"extips": schema.Int64Attribute{
Computed: true,
},
"exttraffic": schema.Int64Attribute{
Computed: true,
},
"gpu": schema.Int64Attribute{
Computed: true,
},
"ram": schema.Int64Attribute{
Computed: true,
},
"seps": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"sep_id": schema.StringAttribute{
Computed: true,
},
"data_name": schema.StringAttribute{
Computed: true,
},
"disk_size": schema.Float64Attribute{
Computed: true,
},
"disk_size_max": schema.Float64Attribute{
Computed: true,
},
},
},
},
},
},
"reserved": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"cpu": schema.Int64Attribute{
Computed: true,
},
"disk_size": schema.Float64Attribute{
Computed: true,
},
"disk_size_max": schema.Float64Attribute{
Computed: true,
},
"ext_ips": schema.Int64Attribute{
Computed: true,
},
"ext_traffic": schema.Int64Attribute{
Computed: true,
},
"gpu": schema.Int64Attribute{
Computed: true,
},
"ram": schema.Int64Attribute{
Computed: true,
},
"seps": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"sep_id": schema.StringAttribute{
Computed: true,
},
"data_name": schema.StringAttribute{
Computed: true,
},
"disk_size": schema.Float64Attribute{
Computed: true,
},
"disk_size_max": schema.Float64Attribute{
Computed: true,
},
},
},
},
},
},
"resource_limits": schema.SingleNestedAttribute{
Optional: true,
Attributes: map[string]schema.Attribute{
"cu_c": schema.Float64Attribute{
Computed: true,
},
"cu_d": schema.Float64Attribute{
Computed: true,
},
"cu_dm": schema.Float64Attribute{
Computed: true,
},
"cu_i": schema.Float64Attribute{
Computed: true,
},
"cu_m": schema.Float64Attribute{
Computed: true,
},
"cu_np": schema.Float64Attribute{
Computed: true,
},
"gpu_units": schema.Float64Attribute{
Computed: true,
},
},
},
}
}

@ -0,0 +1,181 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceAccountListDeleted() map[string]schema.Attribute {
return map[string]schema.Attribute{
// optional attributes
"by_id": schema.Int64Attribute{
Optional: true,
Description: "filter by id",
},
"name": schema.StringAttribute{
Optional: true,
Description: "filter by name",
},
"acl": schema.StringAttribute{
Optional: true,
Description: "filter by acl",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "page number",
},
"size": schema.Int64Attribute{
Optional: true,
Description: "page size",
},
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"dc_location": schema.StringAttribute{
Computed: true,
},
"ckey": schema.StringAttribute{
Computed: true,
},
"meta": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"acl": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"explicit": schema.BoolAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"right": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"user_group_id": schema.StringAttribute{
Computed: true,
},
},
},
},
"compute_features": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"company": schema.StringAttribute{
Computed: true,
},
"companyurl": schema.StringAttribute{
Computed: true,
},
"cpu_allocation_parameter": schema.StringAttribute{
Computed: true,
},
"cpu_allocation_ratio": schema.Float64Attribute{
Computed: true,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"created_by": schema.StringAttribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"deleted_by": schema.StringAttribute{
Computed: true,
},
"account_id": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"updated_time": schema.Int64Attribute{
Computed: true,
},
"resource_limits": schema.SingleNestedAttribute{
Optional: true,
Computed: true,
Attributes: map[string]schema.Attribute{
"cu_c": schema.Float64Attribute{
Optional: true,
Computed: true,
},
"cu_d": schema.Float64Attribute{
Optional: true,
Computed: true,
},
"cu_dm": schema.Float64Attribute{
Optional: true,
Computed: true,
},
"cu_i": schema.Float64Attribute{
Optional: true,
Computed: true,
},
"cu_m": schema.Float64Attribute{
Optional: true,
Computed: true,
},
"cu_np": schema.Float64Attribute{
Optional: true,
Computed: true,
},
"gpu_units": schema.Float64Attribute{
Optional: true,
Computed: true,
},
},
},
"resource_types": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"send_access_emails": schema.BoolAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"uniq_pools": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"version": schema.Int64Attribute{
Computed: true,
},
"vins": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

@ -0,0 +1,113 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceAccountGetResourceListConsumption() map[string]schema.Attribute {
return map[string]schema.Attribute{
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"account_id": schema.Int64Attribute{
Required: true,
},
"consumed": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"cpu": schema.Int64Attribute{
Computed: true,
},
"disksize": schema.Float64Attribute{
Computed: true,
},
"disksizemax": schema.Float64Attribute{
Computed: true,
},
"extips": schema.Int64Attribute{
Computed: true,
},
"exttraffic": schema.Int64Attribute{
Computed: true,
},
"gpu": schema.Int64Attribute{
Computed: true,
},
"ram": schema.Int64Attribute{
Computed: true,
},
"seps": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"sep_id": schema.StringAttribute{
Computed: true,
},
"data_name": schema.StringAttribute{
Computed: true,
},
"disk_size": schema.Float64Attribute{
Computed: true,
},
"disk_size_max": schema.Float64Attribute{
Computed: true,
},
},
},
},
},
},
"reserved": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"cpu": schema.Int64Attribute{
Computed: true,
},
"disksize": schema.Float64Attribute{
Computed: true,
},
"disksizemax": schema.Float64Attribute{
Computed: true,
},
"extips": schema.Int64Attribute{
Computed: true,
},
"exttraffic": schema.Int64Attribute{
Computed: true,
},
"gpu": schema.Int64Attribute{
Computed: true,
},
"ram": schema.Int64Attribute{
Computed: true,
},
"seps": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"sep_id": schema.StringAttribute{
Computed: true,
},
"data_name": schema.StringAttribute{
Computed: true,
},
"disk_size": schema.Float64Attribute{
Computed: true,
},
"disk_size_max": schema.Float64Attribute{
Computed: true,
},
},
},
},
},
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

@ -0,0 +1,232 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceAccountRGList() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"account_id": schema.Int64Attribute{
Required: true,
Description: "ID of the account",
},
// optional attributes
"rg_id": schema.Int64Attribute{
Optional: true,
Description: "find by rg id",
},
"vins_id": schema.Int64Attribute{
Optional: true,
Description: "find by vins id",
},
"vm_id": schema.Int64Attribute{
Optional: true,
Description: "find by vm id",
},
"name": schema.StringAttribute{
Optional: true,
Description: "find by name",
},
"status": schema.StringAttribute{
Optional: true,
Description: "find by status",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "page number",
},
"size": schema.Int64Attribute{
Optional: true,
Description: "page size",
},
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"computes": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"started": schema.Int64Attribute{
Computed: true,
},
"stopped": schema.Int64Attribute{
Computed: true,
},
},
},
"resources": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"consumed": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"cpu": schema.Int64Attribute{
Computed: true,
},
"disksize": schema.Int64Attribute{
Computed: true,
},
"disksizemax": schema.Int64Attribute{
Computed: true,
},
"extips": schema.Int64Attribute{
Computed: true,
},
"exttraffic": schema.Int64Attribute{
Computed: true,
},
"gpu": schema.Int64Attribute{
Computed: true,
},
"ram": schema.Int64Attribute{
Computed: true,
},
"seps": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"sep_id": schema.StringAttribute{
Computed: true,
},
"data_name": schema.StringAttribute{
Computed: true,
},
"disk_size": schema.Float64Attribute{
Computed: true,
},
"disk_size_max": schema.Float64Attribute{
Computed: true,
},
},
},
},
},
},
"limits": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"cpu": schema.Int64Attribute{
Computed: true,
},
"disksize": schema.Int64Attribute{
Computed: true,
},
"disksizemax": schema.Int64Attribute{
Computed: true,
},
"extips": schema.Int64Attribute{
Computed: true,
},
"exttraffic": schema.Int64Attribute{
Computed: true,
},
"gpu": schema.Int64Attribute{
Computed: true,
},
"ram": schema.Int64Attribute{
Computed: true,
},
"seps": schema.Int64Attribute{
Computed: true,
},
},
},
"reserved": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"cpu": schema.Int64Attribute{
Computed: true,
},
"disksize": schema.Float64Attribute{
Computed: true,
},
"disksizemax": schema.Float64Attribute{
Computed: true,
},
"extips": schema.Int64Attribute{
Computed: true,
},
"exttraffic": schema.Int64Attribute{
Computed: true,
},
"gpu": schema.Int64Attribute{
Computed: true,
},
"ram": schema.Int64Attribute{
Computed: true,
},
"seps": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"sep_id": schema.StringAttribute{
Computed: true,
},
"data_name": schema.StringAttribute{
Computed: true,
},
"disk_size": schema.Float64Attribute{
Computed: true,
},
"disk_size_max": schema.Float64Attribute{
Computed: true,
},
},
},
},
},
},
},
},
"created_by": schema.StringAttribute{
Computed: true,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_by": schema.StringAttribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"rg_id": schema.Int64Attribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"rg_name": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"updated_by": schema.StringAttribute{
Computed: true,
},
"updated_time": schema.Int64Attribute{
Computed: true,
},
"vinses": schema.Int64Attribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

@ -0,0 +1,210 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework-validators/setvalidator"
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaResourceAccount() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"account_name": schema.StringAttribute{
Required: true,
Description: "name of the account",
},
"username": schema.StringAttribute{
Required: true,
Description: "username of owner the account",
},
// optional attributes
"emailaddress": schema.StringAttribute{
Optional: true,
Description: "email",
},
"send_access_emails": schema.BoolAttribute{
Optional: true,
Description: "if true send emails when a user is granted access to resources",
},
"uniq_pools": schema.ListAttribute{
Optional: true,
Computed: true,
ElementType: types.StringType,
},
"users": schema.ListNestedAttribute{
Optional: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"user_id": schema.StringAttribute{
Required: true,
},
"access_type": schema.StringAttribute{
Required: true,
},
"recursive_delete": schema.BoolAttribute{
Optional: true,
// default is false
},
},
},
},
"cpu_allocation_parameter": schema.StringAttribute{
Computed: true,
Optional: true,
Description: "set cpu allocation parameter",
},
"cpu_allocation_ratio": schema.Float64Attribute{
Computed: true,
Optional: true,
Description: "set cpu allocation ratio",
},
"available_templates": schema.SetAttribute{
Optional: true,
ElementType: types.Int64Type,
Description: "Share images with account",
},
"restore": schema.BoolAttribute{
Optional: true,
Description: "restore a deleted account",
},
"permanently": schema.BoolAttribute{
Optional: true,
Description: "whether to completely delete the account",
// default is false
},
"enable": schema.BoolAttribute{
Optional: true,
Description: "enable/disable account",
},
"resource_limits": schema.SingleNestedAttribute{
Optional: true,
Computed: true,
Attributes: map[string]schema.Attribute{
"cu_c": schema.Float64Attribute{
Optional: true,
Computed: true,
},
"cu_d": schema.Float64Attribute{
Optional: true,
Computed: true,
},
"cu_i": schema.Float64Attribute{
Optional: true,
Computed: true,
},
"cu_m": schema.Float64Attribute{
Optional: true,
Computed: true,
},
"cu_np": schema.Float64Attribute{
Optional: true,
Computed: true,
},
"gpu_units": schema.Float64Attribute{
Optional: true,
Computed: true,
},
},
},
"compute_features": schema.SetAttribute{
Optional: true,
ElementType: types.StringType,
Computed: true,
Validators: []validator.Set{
setvalidator.ValueStringsAre(stringvalidator.OneOfCaseInsensitive("hugepages", "numa", "cpupin", "vfnic")),
},
},
"account_id": schema.Int64Attribute{
Optional: true,
Computed: true,
},
"dc_location": schema.StringAttribute{
Computed: true,
},
"ckey": schema.StringAttribute{
Computed: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"acl": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"explicit": schema.BoolAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"right": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"user_group_id": schema.StringAttribute{
Computed: true,
},
},
},
},
"company": schema.StringAttribute{
Computed: true,
},
"companyurl": schema.StringAttribute{
Computed: true,
},
"created_by": schema.StringAttribute{
Computed: true,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deactivation_time": schema.Float64Attribute{
Computed: true,
},
"deleted_by": schema.StringAttribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"displayname": schema.StringAttribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"resource_types": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"status": schema.StringAttribute{
Computed: true,
},
"updated_time": schema.Int64Attribute{
Computed: true,
},
"version": schema.Int64Attribute{
Computed: true,
},
"vins": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
}
}

@ -0,0 +1,21 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
)
func AccountAuditsListDataSourceCheckPresence(ctx context.Context, accountId uint64, c *decort.DecortClient) (*account.ListAudits, error) {
tflog.Info(ctx, fmt.Sprintf("AccountAuditsListDataSourceCheckPresence: Get info about account audits with account ID - %v", accountId))
auditsList, err := c.CloudBroker().Account().Audits(ctx, account.AuditsRequest{AccountID: accountId})
if err != nil {
return nil, fmt.Errorf("cannot get info about account audits with error: %w", err)
}
return &auditsList, err
}

@ -0,0 +1,21 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
)
func AccountAvailableTemplatesListDataSourceCheckPresence(ctx context.Context, accountId uint64, c *decort.DecortClient) ([]uint64, error) {
tflog.Info(ctx, fmt.Sprintf("AccountAvailableTemplatesListDataSourceCheckPresence: Get info about templates with account ID - %v", accountId))
templatesList, err := c.CloudBroker().Account().ListAvailableTemplates(ctx, account.ListAvailableTemplatesRequest{AccountID: accountId})
if err != nil {
return nil, fmt.Errorf("cannot get info about templated with error: %w", err)
}
return templatesList, err
}

@ -0,0 +1,61 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
)
func AccountComputesListDataSourceCheckPresence(ctx context.Context, plan *models.ListComputesModel, c *decort.DecortClient) (*account.ListComputes, error) {
tflog.Info(ctx, "AccountComputesListDataSourceCheckPresence: Get info about list accounts")
req := account.ListComputesRequest{
AccountID: uint64(plan.AccountID.ValueInt64()),
}
if !plan.ComputeID.IsNull() {
req.ComputeID = uint64(plan.ComputeID.ValueInt64())
}
if !plan.Name.IsNull() {
req.Name = plan.Name.ValueString()
}
if !plan.RGName.IsNull() {
req.RGName = plan.RGName.ValueString()
}
if !plan.RGID.IsNull() {
req.RGID = uint64(plan.RGID.ValueInt64())
}
if !plan.TechStatus.IsNull() {
req.TechStatus = plan.TechStatus.ValueString()
}
if !plan.IpAddress.IsNull() {
req.IPAddress = plan.IpAddress.ValueString()
}
if !plan.ExtNetName.IsNull() {
req.ExtNetName = plan.ExtNetName.ValueString()
}
if !plan.ExtNetID.IsNull() {
req.ExtNetID = uint64(plan.ExtNetID.ValueInt64())
}
if !plan.Page.IsNull() {
req.Page = uint64(plan.Page.ValueInt64())
}
if !plan.Size.IsNull() {
req.Size = uint64(plan.Size.ValueInt64())
}
if !plan.SortBy.IsNull() {
req.SortBy = plan.SortBy.ValueString()
}
tflog.Info(ctx, "AccountComputesListDataSourceCheckPresence: before call CloudBroker().Account().List", map[string]any{"req": req})
listComputes, err := c.CloudBroker().Account().ListComputes(ctx, req)
if err != nil {
return nil, fmt.Errorf("cannot get list computes with error: %w", err)
}
tflog.Info(ctx, "AccountComputesListDataSourceCheckPresence: response from CloudBroker().Account().List")
return listComputes, err
}

@ -0,0 +1,51 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
)
func AccountDisksListCheckPresence(ctx context.Context, plan *models.DataSourceAccountDisksListModel, c *decort.DecortClient) (*account.ListDisks, error) {
tflog.Info(ctx, "AccountDisksListCheckPresence: Get info about account disks list")
disksListReq := account.ListDisksRequest{
AccountID: uint64(plan.AccountID.ValueInt64()),
}
if !plan.DiskID.IsNull() {
disksListReq.DiskID = uint64(plan.DiskID.ValueInt64())
}
if !plan.Name.IsNull() {
disksListReq.Name = plan.Name.ValueString()
}
if !plan.DiskMaxSize.IsNull() {
disksListReq.DiskMaxSize = uint64(plan.DiskMaxSize.ValueInt64())
}
if !plan.Type.IsNull() {
disksListReq.Type = plan.Type.ValueString()
}
if !plan.Page.IsNull() {
disksListReq.Page = uint64(plan.Page.ValueInt64())
}
if !plan.Size.IsNull() {
disksListReq.Size = uint64(plan.Size.ValueInt64())
}
if !plan.SortBy.IsNull() {
disksListReq.SortBy = plan.SortBy.ValueString()
}
tflog.Info(ctx, "AccountDisksListCheckPresence: before call CloudBroker().Account().ListDisks", map[string]any{"req": disksListReq})
disksList, err := c.CloudBroker().Account().ListDisks(ctx, disksListReq)
if err != nil {
return nil, fmt.Errorf("cannot get info about account disks list with error: %w", err)
}
tflog.Info(ctx, "AccountDisksListCheckPresence: response from CloudBroker().Account().ListDisks")
return disksList, err
}

@ -0,0 +1,55 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
)
func AccountFlipgroupsListCheckPresence(ctx context.Context, plan *models.DataSourceAccountFlipgroupsListModel, c *decort.DecortClient) (*account.ListFLIPGroups, error) {
tflog.Info(ctx, "AccountFlipgroupsListCheckPresence: Get info about account flipgroups list")
flipgroupsListReq := account.ListFLIPGroupsRequest{AccountID: uint64(plan.AccountID.ValueInt64())}
if !plan.Name.IsNull() {
flipgroupsListReq.Name = plan.Name.ValueString()
}
if !plan.VINSID.IsNull() {
flipgroupsListReq.VINSID = uint64(plan.VINSID.ValueInt64())
}
if !plan.VINSName.IsNull() {
flipgroupsListReq.VINSName = plan.VINSName.ValueString()
}
if !plan.ExtNetID.IsNull() {
flipgroupsListReq.ExtNetID = uint64(plan.ExtNetID.ValueInt64())
}
if !plan.ByIP.IsNull() {
flipgroupsListReq.ByIP = plan.ByIP.ValueString()
}
if !plan.FLIPGroupID.IsNull() {
flipgroupsListReq.FLIPGroupID = uint64(plan.FLIPGroupID.ValueInt64())
}
if !plan.SortBy.IsNull() {
flipgroupsListReq.SortBy = plan.SortBy.ValueString()
}
if !plan.Page.IsNull() {
flipgroupsListReq.Page = uint64(plan.Page.ValueInt64())
}
if !plan.Size.IsNull() {
flipgroupsListReq.Size = uint64(plan.Size.ValueInt64())
}
tflog.Info(ctx, "AccountListCheckPresence: before call CloudBroker().Account().ListFLIPGroups", map[string]any{"req": flipgroupsListReq})
flipgroupsList, err := c.CloudBroker().Account().ListFLIPGroups(ctx, flipgroupsListReq)
if err != nil {
return nil, fmt.Errorf("cannot get info about account flipgroups list with error: %w", err)
}
tflog.Info(ctx, "AccountListCheckPresence: response from CloudBroker().Account().ListFLIPGroups")
return flipgroupsList, err
}

@ -0,0 +1,24 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
)
func AccountGetResourceConsumptionDataSourceCheckPresence(ctx context.Context, accountId uint64, c *decort.DecortClient) (*account.RecordResourceConsumption, error) {
tflog.Info(ctx, fmt.Sprintf("AccountGetResourceConsumptionDataSourceCheckPresence: Get info about account with ID - %v", accountId))
record, err := c.CloudBroker().Account().GetResourceConsumption(ctx, account.GetResourceConsumptionRequest{AccountID: accountId})
if err != nil {
return nil, fmt.Errorf("cannot get info about resource with error: %w", err)
}
tflog.Info(ctx, "AccountGetResourceConsumptionDataSourceCheckPresence: response from CloudBroker().Account().GetResourceConsumption",
map[string]any{"account_id": accountId, "response": record})
return record, err
}

@ -0,0 +1,46 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
)
func AccountListDeletedCheckPresence(ctx context.Context, plan *models.DataSourceAccountListDeletedModel, c *decort.DecortClient) (*account.ListAccounts, error) {
tflog.Info(ctx, "AccountListDeletedCheckPresence: Get info about account list deleted")
accListDelReq := account.ListDeletedRequest{}
if !plan.ByID.IsNull() {
accListDelReq.ByID = uint64(plan.ByID.ValueInt64())
}
if !plan.Name.IsNull() {
accListDelReq.Name = plan.Name.ValueString()
}
if !plan.ACL.IsNull() {
accListDelReq.ACL = plan.ACL.ValueString()
}
if !plan.Page.IsNull() {
accListDelReq.Page = uint64(plan.Page.ValueInt64())
}
if !plan.Size.IsNull() {
accListDelReq.Size = uint64(plan.Size.ValueInt64())
}
if !plan.SortBy.IsNull() {
accListDelReq.SortBy = plan.SortBy.ValueString()
}
tflog.Info(ctx, "AccountListDeletedCheckPresence: before call CloudBroker().Account().ListDeleted", map[string]any{"req": accListDelReq})
accListDel, err := c.CloudBroker().Account().ListDeleted(ctx, accListDelReq)
if err != nil {
return nil, fmt.Errorf("cannot get info about account with error: %w", err)
}
tflog.Info(ctx, "AccountListDeletedCheckPresence: response from CloudBroker().Account().ListDeleted")
return accListDel, err
}

@ -0,0 +1,24 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
)
func AccountGetResourceConsumptionListDataSourceCheckPresence(ctx context.Context, c *decort.DecortClient) (*account.ListResources, error) {
tflog.Info(ctx, "AccountGetResourceConsumptionListDataSourceCheckPresence: Get info about account resource consumption list")
record, err := c.CloudBroker().Account().ListResourceConsumption(ctx)
if err != nil {
return nil, fmt.Errorf("cannot get info about resource with error: %w", err)
}
tflog.Info(ctx, "AccountGetResourceConsumptionListDataSourceCheckPresence: response from CloudBroker().Account().ListResourceConsumption",
map[string]any{"response": record})
return record, err
}

@ -0,0 +1,52 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
)
func AccountRGListCheckPresence(ctx context.Context, plan *models.DataSourceAccountRGListModel, c *decort.DecortClient) (*account.ListRG, error) {
tflog.Info(ctx, "AccountRGListCheckPresence: Get info about account rg list")
rgListReq := account.ListRGRequest{AccountID: uint64(plan.AccountID.ValueInt64())}
if !plan.RGID.IsNull() {
rgListReq.RGID = uint64(plan.RGID.ValueInt64())
}
if !plan.VinsID.IsNull() {
rgListReq.VINSID = uint64(plan.VinsID.ValueInt64())
}
if !plan.VMID.IsNull() {
rgListReq.VMID = uint64(plan.VMID.ValueInt64())
}
if !plan.Name.IsNull() {
rgListReq.Name = plan.Name.ValueString()
}
if !plan.Status.IsNull() {
rgListReq.Status = plan.Status.ValueString()
}
if !plan.Page.IsNull() {
rgListReq.Page = uint64(plan.Page.ValueInt64())
}
if !plan.Size.IsNull() {
rgListReq.Size = uint64(plan.Size.ValueInt64())
}
if !plan.SortBy.IsNull() {
rgListReq.SortBy = plan.SortBy.ValueString()
}
tflog.Info(ctx, "AccountRGListCheckPresence: before call CloudBroker().Account().ListRG", map[string]any{"req": rgListReq})
rgList, err := c.CloudBroker().Account().ListRG(ctx, rgListReq)
if err != nil {
return nil, fmt.Errorf("cannot get info about account with error: %w", err)
}
tflog.Info(ctx, "AccountRGListCheckPresence: response from CloudBroker().Account().ListRG")
return rgList, err
}

@ -0,0 +1,725 @@
package utilities
import (
"context"
"fmt"
"strconv"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/ic"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status"
)
// AccountResourceCheckPresence checks if account with accountId exists
func AccountResourceCheckPresence(ctx context.Context, accountId uint64, c *decort.DecortClient) (*account.RecordAccount, error) {
tflog.Info(ctx, fmt.Sprintf("AccountResourceCheckPresence: Get info about resource with ID - %v", accountId))
accountRecord, err := c.CloudBroker().Account().Get(ctx, account.GetRequest{AccountID: accountId})
if err != nil {
return nil, fmt.Errorf("AccountResourceCheckPresence: cannot get info about resource with error: %w", err)
}
tflog.Info(ctx, "AccountResourceCheckPresence: response from CloudBroker().Account().Get", map[string]any{"account_id": accountId, "response": accountRecord})
return accountRecord, err
}
// AccountReadStatus loads account resource by its id, gets it current status. Performs restore and enable if needed for
// Deleted status.
// In case of failure returns errors.
func AccountReadStatus(ctx context.Context, state *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "AccountReadStatus: Read status resource with ID", map[string]any{"account_id": state.Id.ValueString()})
diags := diag.Diagnostics{}
accountId, err := strconv.ParseUint(state.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("AccountReadStatus: Cannot parse resource ID from state", err.Error())
return diags
}
recordAccount, err := AccountResourceCheckPresence(ctx, accountId, c)
if err != nil {
diags.AddError("AccountReadStatus: Unable to Read account before status check", err.Error())
return diags
}
// check resource status
switch recordAccount.Status {
case status.Disabled:
tflog.Info(ctx, "The account is in status Disabled, troubles may occur with update. Please, enable account first.")
case status.Deleted:
restore := state.Restore.ValueBool()
if state.Restore.IsNull() {
restore = true
} // default true
if restore {
// attempt to restore account
tflog.Info(ctx, "AccountReadStatus: account with status.Deleted is being read, attempt to restore it", map[string]any{
"account_id": accountId,
"status": recordAccount.Status})
diags.Append(RestoreAccount(ctx, accountId, c)...)
if diags.HasError() {
tflog.Error(ctx, "AccountReadStatus: cannot restore account")
return diags
}
tflog.Info(ctx, "AccountReadStatus: account restored successfully", map[string]any{"account_id": accountId})
} else {
tflog.Info(ctx, "AccountReadStatus: account is i status Deleted but restore is not specified")
}
case status.Destroyed:
diags.AddError(
"AccountReadStatus: Account is in status Destroyed",
fmt.Sprintf("the resource with account_id %d cannot be read or updated because it has been destroyed", accountId),
)
return diags
case status.Destroying:
diags.AddError(
"AccountReadStatus: Account is in progress with status Destroying",
fmt.Sprintf("the resource with account_id %d cannot be read or updated because it is currently being destroyed", accountId),
)
return diags
}
return nil
}
// RestoreAccount performs account Restore request.
// Returns error in case of failures.
func RestoreAccount(ctx context.Context, accountId uint64, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
restoreReq := account.RestoreRequest{
AccountID: accountId,
}
tflog.Info(ctx, "RestoreAccount: before calling CloudBroker().Account().Restore", map[string]any{"account_id": accountId, "req": restoreReq})
res, err := c.CloudBroker().Account().Restore(ctx, restoreReq)
if err != nil {
diags.AddError(
"RestoreAccount: cannot restore account",
err.Error(),
)
return diags
}
tflog.Info(ctx, "RestoreAccount: response from CloudBroker().Account().Restore", map[string]any{"account_id": accountId, "response": res})
return nil
}
// EnableDisableAccount performs account Enable/Disable request.
// Returns error in case of failures.
func EnableDisableAccount(ctx context.Context, accountId uint64, enable bool, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start EnableDisableAccount", map[string]any{"account_id": accountId})
diags := diag.Diagnostics{}
if enable {
tflog.Info(ctx, "EnableDisableAccount: before calling CloudBroker().Account().Enable", map[string]any{"account_id": accountId})
res, err := c.CloudBroker().Account().Enable(ctx, account.EnableRequest{AccountID: accountId})
if err != nil {
diags.AddError(
"EnableDisableAccount: cannot enable account",
err.Error(),
)
return diags
}
tflog.Info(ctx, "EnableDisableAccount: response from CloudBroker().Account().Enable", map[string]any{"account_id": accountId, "response": res})
return nil
}
tflog.Info(ctx, "EnableDisableAccount: before calling CloudBroker().Account().Disable", map[string]any{"account_id": accountId})
res, err := c.CloudBroker().Account().Disable(ctx, account.DisableRequest{AccountID: accountId})
if err != nil {
diags.AddError(
"EnableDisableAccount: cannot disable account",
err.Error(),
)
return diags
}
tflog.Info(ctx, "EnableDisableAccount: response from CloudBroker().Account().Disable", map[string]any{"account_id": accountId, "response": res})
return nil
}
func UtilityAccountCreate(ctx context.Context, plan *models.ResourceAccountModel, c *decort.DecortClient) (diag.Diagnostics, *uint64) {
tflog.Info(ctx, "Start UtilityAccountCreate", map[string]any{"account_id": plan.AccountID})
diags := diag.Diagnostics{}
req := account.CreateRequest{
Name: plan.AccountName.ValueString(),
Username: plan.Username.ValueString(),
}
if !plan.EmailAddress.IsUnknown() {
req.EmailAddress = plan.EmailAddress.ValueString()
}
if !plan.SendAccessEmails.IsUnknown() {
req.SendAccessEmails = plan.SendAccessEmails.ValueBool()
}
if !plan.UniqPools.IsUnknown() {
var uniqPools []string
diags.Append(plan.UniqPools.ElementsAs(ctx, &uniqPools, true)...)
if diags.HasError() {
tflog.Error(ctx, "UtilityAccountCreate: cannot populate UniqPools with plan.UniqPools object element")
return diags, nil
}
req.UniqPools = uniqPools
}
if !plan.ResourceLimits.IsUnknown() {
var resourceLimitsPlan models.ResourceLimitsInAccountResourceModel
diags.Append(plan.ResourceLimits.As(ctx, &resourceLimitsPlan, basetypes.ObjectAsOptions{})...)
if diags.HasError() {
tflog.Error(ctx, "UtilityAccountCreate: cannot populate ResourceLimits with plan.ResourceLimits object element")
return diags, nil
}
if resourceLimitsPlan.CUM.ValueFloat64() == 0 {
req.MaxMemoryCapacity = -1
} else {
req.MaxMemoryCapacity = int64(resourceLimitsPlan.CUM.ValueFloat64())
}
if resourceLimitsPlan.CUD.ValueFloat64() == 0 {
req.MaxVDiskCapacity = -1
} else {
req.MaxVDiskCapacity = int64(resourceLimitsPlan.CUD.ValueFloat64())
}
if resourceLimitsPlan.CUC.ValueFloat64() == 0 {
req.MaxCPUCapacity = -1
} else {
req.MaxCPUCapacity = int64(resourceLimitsPlan.CUC.ValueFloat64())
}
if resourceLimitsPlan.CUI.ValueFloat64() == 0 {
req.MaxNumPublicIP = -1
} else {
req.MaxNumPublicIP = int64(resourceLimitsPlan.CUI.ValueFloat64())
}
if resourceLimitsPlan.CUNP.ValueFloat64() == 0 {
req.MaxNetworkPeerTransfer = -1
} else {
req.MaxNetworkPeerTransfer = int64(resourceLimitsPlan.CUNP.ValueFloat64())
}
if resourceLimitsPlan.GPUUnits.ValueFloat64() == 0 {
req.GPUUnits = -1
} else {
req.GPUUnits = int64(resourceLimitsPlan.GPUUnits.ValueFloat64())
}
}
accountId, err := c.CloudBroker().Account().Create(ctx, req)
if err != nil {
diags.AddError("UtilityAccountCreate: Unable to create account",
err.Error())
return diags, nil
}
if !plan.Users.IsUnknown() {
usersPlan := make([]models.UsersModel, 0, len(plan.Users.Elements()))
diagsI := plan.Users.ElementsAs(ctx, &usersPlan, true)
if diagsI.HasError() {
tflog.Error(ctx, "UtilityAccountCreate: cannot populate usersPlan with plan.Users list elements")
diags.AddWarning("UtilityAccountCreate: cannot populate usersPlan with plan.Users list elements",
fmt.Sprintf("%v", diagsI))
return diags, nil
}
for _, v := range usersPlan {
req := account.AddUserRequest{
AccountID: accountId,
Username: v.UserID.ValueString(),
AccessType: v.AccessType.ValueString(),
}
_, err := c.CloudBroker().Account().AddUser(ctx, req)
if err != nil {
diags.AddWarning("UtilityAccountCreate: Unable to add users",
err.Error())
return diags, nil
}
}
}
if !plan.CPUAllocationParameter.IsUnknown() {
req := account.SetCPUAllocationParameterRequest{
AccountID: accountId,
StrictLoose: plan.CPUAllocationParameter.ValueString(),
}
_, err := c.CloudBroker().Account().SetCPUAllocationParameter(ctx, req)
if err != nil {
diags.AddWarning("UtilityAccountCreate: Unable to set CPUAllocationParameter ",
err.Error())
return diags, nil
}
}
if !plan.CPUAllocationRatio.IsUnknown() {
req := account.SetCPUAllocationRatioRequest{
AccountID: accountId,
Ratio: plan.CPUAllocationRatio.ValueFloat64(),
}
_, err := c.CloudBroker().Account().SetCPUAllocationRatio(ctx, req)
if err != nil {
diags.AddWarning("UtilityAccountCreate: Unable to set CPUAllocationRatio ",
err.Error())
return diags, nil
}
}
if !plan.Enable.IsUnknown() && !plan.Enable.ValueBool() {
_, err := c.CloudBroker().Account().Disable(ctx, account.DisableRequest{
AccountID: accountId,
})
if err != nil {
diags.AddWarning("UtilityAccountCreate: Unable to disable account",
err.Error())
return diags, nil
}
}
if !plan.AvailableTemplates.IsUnknown() {
diagsI := UtilityAccountAvailiableTemplatesUpdate(ctx, plan, plan, true, c)
if diagsI.HasError() {
tflog.Error(ctx, "UtilityAccountCreate: error with utilityAccountAvailiableTemplatesUpdate")
diags.AddWarning("UtilityAccountCreate: cannot populate usersPlan with plan.Users list elements", fmt.Sprintf("%v", diagsI))
return diags, nil
}
}
tflog.Info(ctx, "End UtilityAccountCreate", map[string]any{"account_id": plan.AccountID.ValueInt64()})
return diags, &accountId
}
// UpdateAccount updates disk data: account_name, resource_limits, send_access_emails.
// Returns error in case of failures.
func UpdateAccount(ctx context.Context, accountId uint64, plan, state *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start UpdateAccount", map[string]any{"account_id": accountId})
var diags diag.Diagnostics
var updateNeeded bool
updateReq := account.UpdateRequest{
AccountID: accountId,
}
// check if account_name was changed
if !plan.AccountName.Equal(state.AccountName) {
updateReq.Name = plan.AccountName.ValueString()
updateNeeded = true
}
// check if resource_limits were changed
if !plan.ResourceLimits.Equal(state.ResourceLimits) && !plan.ResourceLimits.IsUnknown() {
tflog.Info(ctx, "UpdateAccount: new ResourceLimits specified", map[string]any{"account_id": accountId})
var resourceLimitsPlan models.ResourceLimitsInAccountResourceModel
diags.Append(plan.ResourceLimits.As(ctx, &resourceLimitsPlan, basetypes.ObjectAsOptions{})...)
if diags.HasError() {
tflog.Error(ctx, "UpdateAccount: cannot populate ResourceLimits with plan.ResourceLimits object element")
return diags
}
if resourceLimitsPlan.CUM.ValueFloat64() == 0 {
updateReq.MaxMemoryCapacity = -1
} else {
updateReq.MaxMemoryCapacity = int64(resourceLimitsPlan.CUM.ValueFloat64())
}
if resourceLimitsPlan.CUD.ValueFloat64() == 0 {
updateReq.MaxVDiskCapacity = -1
} else {
updateReq.MaxVDiskCapacity = int64(resourceLimitsPlan.CUD.ValueFloat64())
}
if resourceLimitsPlan.CUC.ValueFloat64() == 0 {
updateReq.MaxCPUCapacity = -1
} else {
updateReq.MaxCPUCapacity = int64(resourceLimitsPlan.CUC.ValueFloat64())
}
if resourceLimitsPlan.CUI.ValueFloat64() == 0 {
updateReq.MaxNumPublicIP = -1
} else {
updateReq.MaxNumPublicIP = int64(resourceLimitsPlan.CUI.ValueFloat64())
}
if resourceLimitsPlan.CUNP.ValueFloat64() == 0 {
updateReq.MaxNetworkPeerTransfer = -1
} else {
updateReq.MaxNetworkPeerTransfer = int64(resourceLimitsPlan.CUNP.ValueFloat64())
}
if resourceLimitsPlan.GPUUnits.ValueFloat64() == 0 {
updateReq.GPUUnits = -1
} else {
updateReq.GPUUnits = int64(resourceLimitsPlan.GPUUnits.ValueFloat64())
}
updateNeeded = true
}
// check if send_access_emails was changed
if !plan.SendAccessEmails.Equal(state.SendAccessEmails) && !plan.SendAccessEmails.IsNull() {
updateReq.SendAccessEmails = plan.SendAccessEmails.ValueBool()
updateNeeded = true
}
if !updateNeeded {
tflog.Info(ctx, "UpdateAccount: no general account update is needed because neither account_name, nor resource_limits, nor send_access_emails were changed.", map[string]any{
"account_id": plan.Id.ValueString(),
})
return nil
}
// perform account update
tflog.Info(ctx, "UpdateAccount: before calling CloudBroker().Account().Update", map[string]any{
"account_id": accountId,
"req": updateReq,
})
res, err := c.CloudBroker().Account().Update(ctx, updateReq)
if err != nil {
diags.AddError("UpdateAccount: Unable to update account",
err.Error())
return diags
}
tflog.Info(ctx, "UpdateAccount: response from CloudBroker().Account().Update", map[string]any{
"account_id": accountId,
"response": res})
return nil
}
// AddDeleteUsersAccount adds/deletes users to/from account.
// In case of failure returns errors.
func AddDeleteUsersAccount(ctx context.Context, accountId uint64, plan, state *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start AddDeleteUsersAccount: new users specified", map[string]any{"account_id": accountId})
diags := diag.Diagnostics{}
usersPlan := make([]models.UsersModel, 0, len(plan.Users.Elements()))
diags.Append(plan.Users.ElementsAs(ctx, &usersPlan, true)...)
if diags.HasError() {
tflog.Error(ctx, "AddDeleteUsersAccount: cannot populate usersPlan with plan.Users list elements")
return diags
}
usersState := make([]models.UsersModel, 0, len(state.Users.Elements()))
diags.Append(state.Users.ElementsAs(ctx, &usersState, true)...)
if diags.HasError() {
tflog.Error(ctx, "AddDeleteUsersAccount: cannot populate usersState with state.Users list elements")
return diags
}
// define users to be deleted, added and updated
var deletedUsers, addedUsers, updatedUsers []models.UsersModel
for _, user := range usersState {
if !containsUser(usersPlan, user) {
deletedUsers = append(deletedUsers, user)
}
}
for _, user := range usersPlan {
if !containsUser(usersState, user) {
addedUsers = append(addedUsers, user)
} else if isChangedUser(usersState, user) {
updatedUsers = append(updatedUsers, user)
}
}
// delete users
if len(deletedUsers) == 0 {
tflog.Info(ctx, "AddDeleteUsersAccount: no users need to be deleted", map[string]any{"account_id": accountId})
}
if len(deletedUsers) > 0 {
tflog.Info(ctx, "AddDeleteUsersAccount: users need to be deleted", map[string]any{
"accountId": accountId,
"deletedUsers": deletedUsers})
for _, user := range deletedUsers {
delUserReq := account.DeleteUserRequest{
AccountID: accountId,
UserName: user.UserID.ValueString(),
RecursiveDelete: user.RecursiveDelete.ValueBool(), // default false
}
tflog.Info(ctx, "AddDeleteUsersAccount: before calling CloudBroker().Account().DeleteUser", map[string]any{"account_id": accountId, "req": delUserReq})
res, err := c.CloudBroker().Account().DeleteUser(ctx, delUserReq)
tflog.Info(ctx, "AddDeleteUsersAccount: response from CloudBroker().Account().DeleteUser", map[string]any{"account_id": accountId, "response": res})
if err != nil {
diags.AddError(
"AddDeleteUsersAccount: can not delete user from account",
err.Error())
}
}
}
// add users
if len(addedUsers) == 0 {
tflog.Info(ctx, "AddDeleteUsersAccount: no users needs to be added", map[string]any{"account_id": accountId})
}
if len(addedUsers) > 0 {
tflog.Info(ctx, "AddDeleteUsersAccount: users need to be added", map[string]any{"account_id": accountId})
for _, user := range addedUsers {
addUserReq := account.AddUserRequest{
AccountID: accountId,
Username: user.UserID.ValueString(),
AccessType: user.AccessType.ValueString(),
}
tflog.Info(ctx, "AddDeleteUsersAccount: before calling CloudBroker().Account().AddUser", map[string]any{
"account_id": accountId,
"addUserReq": addUserReq})
res, err := c.CloudBroker().Account().AddUser(ctx, addUserReq)
if err != nil {
diags.AddError("AddDeleteUsersAccount: Unable to add users to account",
err.Error())
}
tflog.Info(ctx, "AddDeleteUsersAccount: response from CloudBroker().Account().AddUser", map[string]any{
"account_id": accountId,
"response": res})
}
}
// update users
if len(updatedUsers) == 0 {
tflog.Info(ctx, "AddDeleteUsersAccount: no users needs to be updated", map[string]any{"account_id": accountId})
}
if len(updatedUsers) > 0 {
tflog.Info(ctx, "AddDeleteUsersAccount: users need to be updated", map[string]any{"account_id": accountId})
for _, user := range updatedUsers {
updUserReq := account.UpdateUserRequest{
AccountID: accountId,
UserID: user.UserID.ValueString(),
AccessType: user.AccessType.ValueString(),
}
tflog.Info(ctx, "AddDeleteUsersAccount: before calling CloudBroker().Account().UpdateUser", map[string]any{
"account_id": accountId,
"updatedUsers": updatedUsers})
res, err := c.CloudBroker().Account().UpdateUser(ctx, updUserReq)
if err != nil {
diags.AddError("AddDeleteUsersAccount: Unable to update users",
err.Error())
}
tflog.Info(ctx, "AddDeleteUsersAccount: response from CloudBroker().Account().UpdateUser", map[string]any{
"account_id": accountId,
"response": res})
}
}
return diags
}
func containsUser(users []models.UsersModel, target models.UsersModel) bool {
for _, user := range users {
if target.UserID == user.UserID {
return true
}
}
return false
}
func isChangedUser(users []models.UsersModel, target models.UsersModel) bool {
for _, user := range users {
if user.UserID.Equal(target.UserID) && !user.AccessType.Equal(target.AccessType) {
return true
}
}
return false
}
func UtilityAccountCPUParameterUpdate(ctx context.Context, accountID uint64, plan *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start utilityAccountCPUParameterUpdate", map[string]any{"account_id": plan.AccountID})
diags := diag.Diagnostics{}
_, err := c.CloudBroker().Account().SetCPUAllocationParameter(ctx, account.SetCPUAllocationParameterRequest{
AccountID: accountID,
StrictLoose: plan.CPUAllocationParameter.ValueString(),
})
if err != nil {
diags.AddError("utilityAccountCPUParameterUpdate: Unable to update CPUAllocationParameter",
err.Error())
return diags
}
return diags
}
func UtilityAccountCPURatioUpdate(ctx context.Context, accountID uint64, plan *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start utilityAccountCPURatioUpdate", map[string]any{"account_id": plan.AccountID})
diags := diag.Diagnostics{}
_, err := c.CloudBroker().Account().SetCPUAllocationRatio(ctx, account.SetCPUAllocationRatioRequest{
AccountID: accountID,
Ratio: plan.CPUAllocationRatio.ValueFloat64(),
})
if err != nil {
diags.AddError("utilityAccountCPURatioUpdate: Unable to update CPUAllocationRatio",
err.Error())
return diags
}
return diags
}
func UtilityAccountAvailiableTemplatesUpdate(ctx context.Context, state, plan *models.ResourceAccountModel, afterCreate bool, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start utilityAccountAvailiableTemplatesUpdate", map[string]any{"account_id": plan.AccountID})
diags := diag.Diagnostics{}
if afterCreate {
imageIds := make([]uint64, 0, len(plan.AvailableTemplates.Elements()))
diags.Append(plan.AvailableTemplates.ElementsAs(ctx, &imageIds, true)...)
if diags.HasError() {
tflog.Error(ctx, "UpdateAccount: cannot populate AvailableTemplates with plan.AvailableTemplates object element")
return diags
}
if len(imageIds) == 0 {
diags.AddError(
"you have not been granted access to any images",
"len(imageIds) == 0",
)
return diags
}
tflog.Error(ctx, "you have not been granted access to any images")
if err := ic.ExistImages(ctx, imageIds, c); err != nil {
diags.AddError(
fmt.Sprintf("can not grant access for available templates: %s", err.Error()),
err.Error(),
)
return diags
}
tflog.Error(ctx, fmt.Sprint(imageIds))
req := account.GrantAccessTemplatesRequest{
AccountID: uint64(state.AccountID.ValueInt64()),
ImageIDs: imageIds,
}
_, err := c.CloudBroker().Account().GrantAccessTemplates(ctx, req)
if err != nil {
diags.AddError(
fmt.Sprintf("can not grant access for available templates: %s", err.Error()),
err.Error(),
)
return diags
}
return diags
}
var oldSet, newSet []int
diags.Append(plan.AvailableTemplates.ElementsAs(ctx, &newSet, true)...)
diags.Append(state.AvailableTemplates.ElementsAs(ctx, &oldSet, true)...)
if diags.HasError() {
tflog.Error(ctx, "UtilityAccountCreate: cannot populate newSet or oldSet with AvailableTemplates")
return diags
}
revokeAT := setDifference(oldSet, newSet)
if len(revokeAT) > 0 {
imageIds := make([]uint64, 0, len(revokeAT))
for _, imageId := range revokeAT {
imageIds = append(imageIds, imageId)
}
if err := ic.ExistImages(ctx, imageIds, c); err != nil {
diags.AddError(fmt.Sprintf("can not grant access for available templates: %s", err), err.Error())
return diags
}
req := account.RevokeAccessTemplatesRequest{
AccountID: uint64(state.AccountID.ValueInt64()),
ImageIDs: imageIds,
}
_, err := c.CloudBroker().Account().RevokeAccessTemplates(ctx, req)
if err != nil {
diags.AddError("UtilityAccountCreate: error with RevokeAccessTemplates", err.Error())
return diags
}
}
addedAT := setDifference(newSet, oldSet)
if len(addedAT) > 0 {
imageIds := make([]uint64, 0, len(addedAT))
imageIds = append(imageIds, addedAT...)
if err := ic.ExistImages(ctx, imageIds, c); err != nil {
diags.AddError(fmt.Sprintf("can grant access for available templates: %s", err.Error()), err.Error())
return diags
}
req := account.GrantAccessTemplatesRequest{
AccountID: uint64(plan.AccountID.ValueInt64()),
ImageIDs: imageIds,
}
_, err := c.CloudBroker().Account().GrantAccessTemplates(ctx, req)
if err != nil {
diags.AddError("UtilityAccountCreate: error with GrantAccessTemplates", err.Error())
return diags
}
}
return diags
}
func setDifference(set, check []int) []uint64 {
mapCheck := make(map[int]struct{})
for _, id := range check {
mapCheck[id] = struct{}{}
}
var diff []uint64
for _, id := range set {
if _, ok := mapCheck[id]; !ok {
diff = append(diff, uint64(id))
}
}
return diff
}
func UtilityAccountComputeFeaturesUpdate(ctx context.Context, accountID uint64, plan *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
var compFeatures []string
diags.Append(plan.ComputeFeatures.ElementsAs(ctx, &compFeatures, true)...)
if diags.HasError() {
tflog.Error(ctx, "utilityAccountComputeFeaturesUpdate: cannot populate compFeatures with plan.ComputeFeatures object element")
return diags
}
req := account.UpdateComputeFeaturesRequest{
AccountID: accountID,
ComputeFeatures: compFeatures,
}
_, err := c.CloudBroker().Account().UpdateComputeFeatures(ctx, req)
if err != nil {
diags.AddError("utilityAccountComputeFeaturesUpdate: error with CloudBroker().Account().UpdateComputeFeatures", err.Error())
return diags
}
return diags
}

@ -0,0 +1,91 @@
package audit
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceAudit{}
)
func NewDataSourceAudit() datasource.DataSource {
return &dataSourceAudit{}
}
// dataSourceAudit is the data source implementation.
type dataSourceAudit struct {
client *decort.DecortClient
}
func (d *dataSourceAudit) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceAudit
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAudit: Error get state")
return
}
auditGuid := state.AuditGuid.ValueString()
tflog.Info(ctx, "Read dataSourceAudit: got state successfully", map[string]any{"audit_guid": auditGuid})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAudit: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceAudit: set timeouts successfully", map[string]any{
"audit_guid": auditGuid,
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.AuditDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAudit: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAudit: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceAudit", map[string]any{"audit_guid": auditGuid})
}
func (d *dataSourceAudit) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceAudit(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceAudit) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_audit"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceAudit) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceAudit")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceAudit successfully")
}

@ -0,0 +1,91 @@
package audit
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceAuditLinkedJobs{}
)
func NewDataSourceAuditLinkedJobs() datasource.DataSource {
return &dataSourceAuditLinkedJobs{}
}
// dataSourceAuditLinkedJobs is the data source implementation.
type dataSourceAuditLinkedJobs struct {
client *decort.DecortClient
}
func (d *dataSourceAuditLinkedJobs) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceAuditLinkedJobs
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAuditLinkedJobs: Error get state")
return
}
auditGuid := state.AuditGuid.ValueString()
tflog.Info(ctx, "Read dataSourceAudit: got state successfully", map[string]any{"audit_guid": auditGuid})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAuditLinkedJobs: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceAuditLinkedJobs: set timeouts successfully", map[string]any{
"audit_guid": auditGuid,
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.AuditLinkedJobsDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAuditLinkedJobs: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAuditLinkedJobs: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceAuditLinkedJobs", map[string]any{"audit_guid": auditGuid})
}
func (d *dataSourceAuditLinkedJobs) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceAuditLinkedJobs(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceAuditLinkedJobs) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_audit_linked_jobs"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceAuditLinkedJobs) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceAuditLinkedJobs")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceAuditLinkedJobs successfully")
}

@ -0,0 +1,89 @@
package audit
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceAuditList{}
)
func NewDataSourceAuditList() datasource.DataSource {
return &dataSourceAuditList{}
}
// dataSourceAuditList is the data source implementation.
type dataSourceAuditList struct {
client *decort.DecortClient
}
func (d *dataSourceAuditList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceAuditList
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAuditList: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceAuditList: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAuditList: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceAuditList: set timeouts successfully", map[string]any{
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.AuditListDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAuditList: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAuditList: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceAuditList")
}
func (d *dataSourceAuditList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceAuditList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceAuditList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_audit_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceAuditList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceAuditList")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceAuditList successfully")
}

@ -0,0 +1,50 @@
package flattens
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/utilities"
)
func AuditDataSource(ctx context.Context, state *models.DataSourceAudit, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.AuditDataSource")
diags := diag.Diagnostics{}
auditGuid := state.AuditGuid.ValueString()
recordAudit, diags := utilities.AuditDataSourceCheckPresence(ctx, auditGuid, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.AuditDataSource: before flatten", map[string]any{"audit_guid": auditGuid})
*state = models.DataSourceAudit{
AuditGuid: state.AuditGuid,
Timeouts: state.Timeouts,
Apitask: types.StringValue(recordAudit.Apitask),
Arguments: types.StringValue(recordAudit.Arguments),
Call: types.StringValue(recordAudit.Call),
GUID: types.StringValue(recordAudit.GUID),
ID: types.StringValue(recordAudit.GUID),
Kwargs: types.StringValue(recordAudit.Kwargs),
RemoteAddr: types.StringValue(recordAudit.RemoteAddr),
ResponseTime: types.Float64Value(recordAudit.ResponseTime),
Result: types.StringValue(recordAudit.Result),
StatusCode: types.Int64Value(int64(recordAudit.StatusCode)),
Tags: types.StringValue(recordAudit.Tags),
Timestamp: types.Float64Value(recordAudit.Timestamp),
TimestampEnd: types.Float64Value(recordAudit.TimestampEnd),
User: types.StringValue(recordAudit.User),
}
tflog.Info(ctx, "End flattens.AuditDataSource", map[string]any{"audit_guid": auditGuid})
return nil
}

@ -0,0 +1,64 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/audit"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/utilities"
)
func AuditLinkedJobsDataSource(ctx context.Context, state *models.DataSourceAuditLinkedJobs, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.AuditLinkedJobsDataSource")
diags := diag.Diagnostics{}
auditGuid := state.AuditGuid.ValueString()
recordAudit, diags := utilities.AuditLinkedJobsDataSourceCheckPresence(ctx, auditGuid, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.AuditLinkedJobsDataSource: before flatten", map[string]any{"audit_guid": auditGuid})
*state = models.DataSourceAuditLinkedJobs{
AuditGuid: state.AuditGuid,
Timeouts: state.Timeouts,
ID: types.StringValue(uuid.New().String()),
Items: flattenLinkedJobs(ctx, *recordAudit),
}
tflog.Info(ctx, "End flattens.AuditLinkedJobsDataSource", map[string]any{"audit_guid": auditGuid})
return nil
}
func flattenLinkedJobs(ctx context.Context, linkedJobs []audit.ItemLinkedJobs) []models.LinkedJob {
tflog.Info(ctx, "Start flattenLinkedJobs")
res := make([]models.LinkedJob, 0, len(linkedJobs))
for _, item := range linkedJobs {
temp := models.LinkedJob{
CMD: types.StringValue(item.CMD),
GUID: types.StringValue(item.GUID),
NID: types.Int64Value(int64(item.NID)),
State: types.StringValue(item.State),
TimeCreate: types.Int64Value(int64(item.TimeCreate)),
TimeStart: types.Int64Value(int64(item.TimeStart)),
TimeStop: types.Int64Value(int64(item.TimeStop)),
Timeout: types.Int64Value(int64(item.Timeout)),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenLinkedJobs")
return res
}

@ -0,0 +1,67 @@
package flattens
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/audit"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/utilities"
)
func AuditListDataSource(ctx context.Context, state *models.DataSourceAuditList, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.AuditListDataSource")
diags := diag.Diagnostics{}
recordAuditList, diags := utilities.AuditListDataSourceCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.AuditListDataSource: before flatten")
*state = models.DataSourceAuditList{
TimestampAt: state.TimestampAt,
TimestampTo: state.TimestampTo,
User: state.User,
Call: state.Call,
MinStatusCode: state.MinStatusCode,
MaxStatusCode: state.MaxStatusCode,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
EntryCount: types.Int64Value(int64(recordAuditList.EntryCount)),
Items: flattenAuditItems(ctx, recordAuditList.Data),
}
tflog.Info(ctx, "End flattens.AuditListDataSource")
return nil
}
func flattenAuditItems(ctx context.Context, auditList []audit.ItemAudit) []models.ItemAudit {
tflog.Info(ctx, "Start flattenAuditItems")
res := make([]models.ItemAudit, 0, len(auditList))
for _, item := range auditList {
temp := models.ItemAudit{
Call: types.StringValue(item.Call),
GUID: types.StringValue(item.GUID),
ResponseTime: types.Float64Value(item.ResponseTime),
StatusCode: types.Int64Value(int64(item.StatusCode)),
Timestamp: types.Float64Value(item.Timestamp),
User: types.StringValue(item.User),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenAuditItems")
return res
}

@ -0,0 +1,28 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceAudit struct {
//required field
AuditGuid types.String `tfsdk:"audit_guid"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
//response field
Apitask types.String `tfsdk:"apitask"`
Arguments types.String `tfsdk:"args"`
Call types.String `tfsdk:"call"`
GUID types.String `tfsdk:"guid"`
ID types.String `tfsdk:"id"`
Kwargs types.String `tfsdk:"kwargs"`
RemoteAddr types.String `tfsdk:"remote_addr"`
ResponseTime types.Float64 `tfsdk:"responsetime"`
Result types.String `tfsdk:"result"`
StatusCode types.Int64 `tfsdk:"status_code"`
Tags types.String `tfsdk:"tags"`
Timestamp types.Float64 `tfsdk:"timestamp"`
TimestampEnd types.Float64 `tfsdk:"timestamp_end"`
User types.String `tfsdk:"user"`
}

@ -0,0 +1,27 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceAuditLinkedJobs struct {
//required field
AuditGuid types.String `tfsdk:"audit_guid"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
//response field
ID types.String `tfsdk:"id"`
Items []LinkedJob `tfsdk:"items"`
}
type LinkedJob struct {
CMD types.String `tfsdk:"cmd"`
GUID types.String `tfsdk:"guid"`
NID types.Int64 `tfsdk:"nid"`
State types.String `tfsdk:"state"`
TimeCreate types.Int64 `tfsdk:"time_create"`
TimeStart types.Int64 `tfsdk:"time_start"`
TimeStop types.Int64 `tfsdk:"time_stop"`
Timeout types.Int64 `tfsdk:"timeout"`
}

@ -0,0 +1,33 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceAuditList struct {
//request field
TimestampAt types.Int64 `tfsdk:"timestamp_at"`
TimestampTo types.Int64 `tfsdk:"timestamp_to"`
User types.String `tfsdk:"user"`
Call types.String `tfsdk:"call"`
MinStatusCode types.Int64 `tfsdk:"min_status_code"`
MaxStatusCode types.Int64 `tfsdk:"max_status_code"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
//response field
EntryCount types.Int64 `tfsdk:"entry_count"`
Items []ItemAudit `tfsdk:"items"`
}
type ItemAudit struct {
Call types.String `tfsdk:"call"`
GUID types.String `tfsdk:"guid"`
ResponseTime types.Float64 `tfsdk:"responsetime"`
StatusCode types.Int64 `tfsdk:"status_code"`
Timestamp types.Float64 `tfsdk:"timestamp"`
User types.String `tfsdk:"user"`
}

@ -0,0 +1,55 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceAudit() map[string]schema.Attribute {
return map[string]schema.Attribute{
"audit_guid": schema.StringAttribute{
Required: true,
},
"apitask": schema.StringAttribute{
Computed: true,
},
"args": schema.StringAttribute{
Computed: true,
},
"call": schema.StringAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"kwargs": schema.StringAttribute{
Computed: true,
},
"remote_addr": schema.StringAttribute{
Computed: true,
},
"responsetime": schema.Float64Attribute{
Computed: true,
},
"result": schema.StringAttribute{
Computed: true,
},
"status_code": schema.Int64Attribute{
Computed: true,
},
"tags": schema.StringAttribute{
Computed: true,
},
"timestamp": schema.Float64Attribute{
Computed: true,
},
"timestamp_end": schema.Float64Attribute{
Computed: true,
},
"user": schema.StringAttribute{
Computed: true,
},
"id": schema.StringAttribute{
Computed: true,
},
}
}

@ -0,0 +1,47 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceAuditLinkedJobs() map[string]schema.Attribute {
return map[string]schema.Attribute{
"audit_guid": schema.StringAttribute{
Required: true,
},
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"cmd": schema.StringAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"nid": schema.Int64Attribute{
Computed: true,
},
"state": schema.StringAttribute{
Computed: true,
},
"time_create": schema.Int64Attribute{
Computed: true,
},
"time_start": schema.Int64Attribute{
Computed: true,
},
"time_stop": schema.Int64Attribute{
Computed: true,
},
"timeout": schema.Int64Attribute{
Computed: true,
},
},
},
},
}
}

@ -0,0 +1,65 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceAuditList() map[string]schema.Attribute {
return map[string]schema.Attribute{
"timestamp_at": schema.Int64Attribute{
Optional: true,
},
"timestamp_to": schema.Int64Attribute{
Optional: true,
},
"user": schema.StringAttribute{
Optional: true,
},
"call": schema.StringAttribute{
Optional: true,
},
"min_status_code": schema.Int64Attribute{
Optional: true,
},
"max_status_code": schema.Int64Attribute{
Optional: true,
},
"sort_by": schema.StringAttribute{
Optional: true,
},
"page": schema.Int64Attribute{
Optional: true,
},
"size": schema.Int64Attribute{
Optional: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"call": schema.StringAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"responsetime": schema.Float64Attribute{
Computed: true,
},
"status_code": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Float64Attribute{
Computed: true,
},
"user": schema.StringAttribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

@ -0,0 +1,27 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/audit"
)
func AuditDataSourceCheckPresence(ctx context.Context, auditGuid string, c *decort.DecortClient) (*audit.RecordAudit, diag.Diagnostics) {
tflog.Info(ctx, fmt.Sprintf("AuditDataSourceCheckPresence: Get info about audit with ID - %v", auditGuid))
diags := diag.Diagnostics{}
recordAudit, err := c.CloudBroker().Audit().Get(ctx, audit.GetRequest{AuditGuid: auditGuid})
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about audit with ID %v", auditGuid), err.Error())
return nil, diags
}
tflog.Info(ctx, "AuditDataSourceCheckPresence: response from CloudBroker().Audit().Get", map[string]any{"audit_guid": auditGuid, "response": recordAudit})
return recordAudit, nil
}

@ -0,0 +1,27 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/audit"
)
func AuditLinkedJobsDataSourceCheckPresence(ctx context.Context, auditGuid string, c *decort.DecortClient) (*audit.ListLinkedJobs, diag.Diagnostics) {
tflog.Info(ctx, fmt.Sprintf("AuditLinkedJobsDataSourceCheckPresence: Get info about audit linked jobs with ID - %v", auditGuid))
diags := diag.Diagnostics{}
linkedJobsList, err := c.CloudBroker().Audit().LinkedJobs(ctx, audit.LinkedJobsRequest{AuditGuid: auditGuid})
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about audit linked jobs with ID %v", auditGuid), err.Error())
return nil, diags
}
tflog.Info(ctx, "AuditLinkedJobsDataSourceCheckPresence: response from CloudBroker().Audit().LinkedJobs", map[string]any{"audit_guid": auditGuid, "response": linkedJobsList})
return linkedJobsList, nil
}

@ -0,0 +1,58 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/audit"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/models"
)
func AuditListDataSourceCheckPresence(ctx context.Context, state *models.DataSourceAuditList, c *decort.DecortClient) (*audit.ListAudits, diag.Diagnostics) {
tflog.Info(ctx, fmt.Sprintf("AuditListDataSourceCheckPresence: Get info about audit list"))
diags := diag.Diagnostics{}
req := audit.ListRequest{}
if !state.TimestampAt.IsNull() {
req.TimestampAt = uint64(state.TimestampAt.ValueInt64())
}
if !state.TimestampTo.IsNull() {
req.TimestampTo = uint64(state.TimestampTo.ValueInt64())
}
if !state.User.IsNull() {
req.User = state.User.ValueString()
}
if !state.Call.IsNull() {
req.Call = state.Call.ValueString()
}
if !state.MaxStatusCode.IsNull() {
req.MaxStatusCode = uint64(state.MaxStatusCode.ValueInt64())
}
if !state.MinStatusCode.IsNull() {
req.MinStatusCode = uint64(state.MinStatusCode.ValueInt64())
}
if !state.SortBy.IsNull() {
req.SortBy = state.SortBy.ValueString()
}
if !state.Page.IsNull() {
req.Page = uint64(state.Page.ValueInt64())
}
if !state.Size.IsNull() {
req.Size = uint64(state.Size.ValueInt64())
}
recordAuditList, err := c.CloudBroker().Audit().List(ctx, req)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about audit list"), err.Error())
return nil, diags
}
tflog.Info(ctx, "AuditDataSourceCheckPresence: response from CloudBroker().Audit().List", map[string]any{"response": recordAuditList})
return recordAuditList, nil
}

@ -0,0 +1,305 @@
// Input checks
package ic
import (
"context"
"fmt"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
account "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
extnet "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet"
grid "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
image "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
lb "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb"
rg "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
vins "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins"
)
func ExistRG(ctx context.Context, rgId uint64, c *decort.DecortClient) error {
req := rg.ListRequest{
ByID: rgId,
IncludeDeleted: false,
}
rgList, err := c.CloudBroker().RG().List(ctx, req)
if err != nil {
return err
}
if len(rgList.Data) == 0 {
return fmt.Errorf("RG with id %v not found", rgId)
}
return nil
}
func ExistAccount(ctx context.Context, accountId uint64, c *decort.DecortClient) error {
req := account.ListRequest{
ByID: accountId,
}
accountList, err := c.CloudBroker().Account().List(ctx, req)
if err != nil {
return err
}
if len(accountList.Data) == 0 {
return fmt.Errorf("account with id %d not found", accountId)
}
return nil
}
func ExistAccounts(ctx context.Context, accountIds []uint64, c *decort.DecortClient) error {
req := account.ListRequest{}
accountList, err := c.CloudBroker().Account().List(ctx, req)
if err != nil {
return err
}
if len(accountList.Data) == 0 {
return fmt.Errorf("you have not been granted access to any account")
}
notFound := make([]uint64, 0, len(accountIds))
for _, accID := range accountIds {
found := false
for _, acc := range accountList.Data {
if accID == acc.ID {
found = true
break
}
}
if !found {
notFound = append(notFound, accID)
}
}
if len(notFound) > 0 {
return fmt.Errorf("Accounts with ids %v not found", notFound)
}
return nil
}
func ExistRGs(ctx context.Context, rgIDs []uint64, c *decort.DecortClient) error {
req := rg.ListRequest{
IncludeDeleted: false,
}
rgList, err := c.CloudBroker().RG().List(ctx, req)
if err != nil {
return err
}
if len(rgList.Data) == 0 {
return fmt.Errorf("you have not been granted access to any resource group")
}
notFound := make([]uint64, 0, len(rgIDs))
for _, rgID := range rgIDs {
found := false
for _, rg := range rgList.Data {
if rgID == rg.ID {
found = true
break
}
}
if !found {
notFound = append(notFound, rgID)
}
}
if len(notFound) > 0 {
return fmt.Errorf("RGs with ids %v not found", notFound)
}
return nil
}
func ExistLB(ctx context.Context, lbId uint64, c *decort.DecortClient) error {
req := lb.ListRequest{
ByID: lbId,
}
lbList, err := c.CloudBroker().LB().List(ctx, req)
if err != nil {
return err
}
if len(lbList.Data) == 0 {
return fmt.Errorf("LB with ID %v not found", lbId)
}
return nil
}
func ExistLBFrontend(ctx context.Context, lbId uint64, fName string, c *decort.DecortClient) error {
lb, err := c.CloudBroker().LB().Get(ctx, lb.GetRequest{LBID: lbId})
if err != nil {
return err
}
frontends := lb.Frontends
for _, f := range frontends {
if f.Name == fName {
return nil
}
}
return fmt.Errorf("frontend with name %v not found", fName)
}
func ExistLBBackend(ctx context.Context, lbId uint64, bName string, c *decort.DecortClient) error {
lb, err := c.CloudBroker().LB().Get(ctx, lb.GetRequest{LBID: lbId})
if err != nil {
return err
}
backends := lb.Backends
for _, b := range backends {
if b.Name == bName {
return nil
}
}
return fmt.Errorf("backend with name %v not found", bName)
}
func ExistExtNetInLb(ctx context.Context, extNetId uint64, c *decort.DecortClient) error {
if extNetId == 0 {
return nil
}
req := extnet.ListRequest{
ByID: extNetId,
}
extNetList, err := c.CloudBroker().ExtNet().List(ctx, req)
if err != nil {
return err
}
if len(extNetList.Data) == 0 {
return fmt.Errorf("EXTNET with ID %v not found", extNetId)
}
return nil
}
func ExistVinsInLb(ctx context.Context, vinsId uint64, c *decort.DecortClient) error {
if vinsId == 0 {
return nil
}
req := vins.ListRequest{
ByID: vinsId,
}
vinsList, err := c.CloudBroker().VINS().List(ctx, req)
if err != nil {
return err
}
if len(vinsList.Data) == 0 {
return fmt.Errorf("VINS with ID %v not found", vinsId)
}
return nil
}
func ExistExtNetInVins(ctx context.Context, extNetId int, c *decort.DecortClient) error {
if extNetId == 0 || extNetId == -1 {
return nil
}
req := extnet.ListRequest{
ByID: uint64(extNetId),
}
extNetList, err := c.CloudBroker().ExtNet().List(ctx, req)
if err != nil {
return err
}
if len(extNetList.Data) == 0 {
return fmt.Errorf("EXTNET with ID %v not found", extNetId)
}
return nil
}
func ExistGID(ctx context.Context, gid uint64, c *decort.DecortClient) error {
req := grid.ListRequest{}
gridList, err := c.CloudBroker().Grid().List(ctx, req)
if err != nil {
return err
}
for _, grid := range gridList.Data {
if grid.GID == gid {
return nil
}
}
return fmt.Errorf("GID with id %v not found", gid)
}
func ExistVins(ctx context.Context, vinsId uint64, c *decort.DecortClient) error {
req := vins.ListRequest{
ByID: vinsId,
IncludeDeleted: false,
}
vinsList, err := c.CloudBroker().VINS().List(ctx, req)
if err != nil {
return err
}
if len(vinsList.Data) == 0 {
return fmt.Errorf("vins with ID %v not found", vinsId)
}
return nil
}
func ExistImages(ctx context.Context, imageIDs []uint64, c *decort.DecortClient) error {
req := image.ListRequest{}
listImages, err := c.CloudBroker().Image().List(ctx, req)
if err != nil {
return err
}
if len(listImages.Data) == 0 {
return fmt.Errorf("you have not been granted access to any images")
}
notFound := make([]uint64, 0, len(imageIDs))
for _, imageID := range imageIDs {
found := false
for _, image := range listImages.Data {
if imageID == image.ID {
found = true
break
}
}
if !found {
notFound = append(notFound, imageID)
}
}
if len(notFound) > 0 {
return fmt.Errorf("images with ids %v not found", notFound)
}
return nil
}

@ -0,0 +1,91 @@
package lb
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceLB{}
)
func NewDataSourceLB() datasource.DataSource {
return &dataSourceLB{}
}
// dataSourceLB is the data source implementation.
type dataSourceLB struct {
client *decort.DecortClient
}
func (d *dataSourceLB) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceLB
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceLB: Error get state")
return
}
lbID := uint64(state.ID.ValueInt64())
tflog.Info(ctx, "Read dataSourceLB: got state successfully", map[string]any{"lb_id": lbID})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceLB: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceLB: set timeouts successfully", map[string]any{
"lb_id": lbID,
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.LBDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceLB: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceLB: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceLB", map[string]any{"lb_id": lbID})
}
func (d *dataSourceLB) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceLB(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceLB) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_lb"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceLB) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceLB")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceLB successfully")
}

@ -0,0 +1,88 @@
package lb
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceLBList{}
)
func NewDataSourceLBList() datasource.DataSource {
return &dataSourceLBList{}
}
// dataSourceLBList is the data source implementation.
type dataSourceLBList struct {
client *decort.DecortClient
}
func (d *dataSourceLBList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceLBList
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceLBList: Error get state")
return
}
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceLBList: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceLBList: set timeouts successfully", map[string]any{
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.LBListDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceLBList: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceLBList: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceLBList")
}
func (d *dataSourceLBList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceLBList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceLBList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_lb_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceLBList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceLBList")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceLBList successfully")
}

@ -0,0 +1,89 @@
package lb
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceLBListDeleted{}
)
func NewDataSourceLBListDeleted() datasource.DataSource {
return &dataSourceLBListDeleted{}
}
// dataSourceLBListDeleted is the data source implementation.
type dataSourceLBListDeleted struct {
client *decort.DecortClient
}
func (d *dataSourceLBListDeleted) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceLBListDeleted
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceLBListDeleted: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceLBListDeleted: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceLBListDeleted: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceLBListDeleted: set timeouts successfully", map[string]any{
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.LBListDeletedDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceLBListDeleted: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceLBListDeleted: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceLBListDeleted")
}
func (d *dataSourceLBListDeleted) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceLBListDeleted(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceLBListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_lb_list_deleted"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceLBListDeleted) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceLBListDeleted")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceLBListDeleted successfully")
}

@ -0,0 +1,219 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities"
)
// LBDataSource flattens data source for lb.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func LBDataSource(ctx context.Context, state *models.DataSourceLB, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.LBDataSource")
diags := diag.Diagnostics{}
lbID := uint64(state.LBID.ValueInt64())
recordLB, diags := utilities.LBDataSourceCheckPresence(ctx, lbID, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.LBDataSource: before flatten", map[string]any{"lb_id": lbID})
acl, _ := json.Marshal(recordLB.ACL)
*state = models.DataSourceLB{
LBID: state.LBID,
Timeouts: state.Timeouts,
HAMode: types.BoolValue(recordLB.HAMode),
ACL: types.StringValue(string(acl)),
BackendHAIP: types.StringValue(recordLB.BackendHAIP),
Backends: flattenBackendsInLB(ctx, recordLB.Backends),
CKey: types.StringValue(recordLB.CKey),
Description: types.StringValue(recordLB.Description),
DPAPIUser: types.StringValue(recordLB.DPAPIUser),
DPAPIPassword: types.StringValue(recordLB.DPAPIPassword),
ExtNetID: types.Int64Value(int64(recordLB.ExtNetID)),
FrontendHAIP: types.StringValue(recordLB.FrontendHAIP),
Frontends: flattenFrontendsInLB(ctx, recordLB.Frontends),
GID: types.Int64Value(int64(recordLB.GID)),
GUID: types.Int64Value(int64(recordLB.GUID)),
ID: types.Int64Value(int64(recordLB.ID)),
ImageID: types.Int64Value(int64(recordLB.ImageID)),
ManagerId: types.Int64Value(int64(recordLB.ManagerId)),
ManagerType: types.StringValue(recordLB.ManagerType),
Meta: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &recordLB.Meta),
Milestones: types.Int64Value(int64(recordLB.Milestones)),
Name: types.StringValue(recordLB.Name),
PartK8s: types.BoolValue(recordLB.PartK8s),
PrimaryNode: flattenNodeInLB(ctx, recordLB.PrimaryNode),
RGID: types.Int64Value(int64(recordLB.RGID)),
SecondaryNode: flattenNodeInLB(ctx, recordLB.SecondaryNode),
Status: types.StringValue(recordLB.Status),
TechStatus: types.StringValue(recordLB.TechStatus),
UserManaged: types.BoolValue(recordLB.UserManaged),
VINSID: types.Int64Value(int64(recordLB.VINSID)),
}
tflog.Info(ctx, "End flattens.LBDataSource", map[string]any{"lb_id": state.ID.ValueInt64()})
return nil
}
func flattenBackendsInLB(ctx context.Context, backends []lb.ItemBackend) types.List {
tflog.Info(ctx, "Start flattenBackendsInLB")
tempSlice := make([]types.Object, 0, len(backends))
for _, backend := range backends {
b := models.ItemBackendModel{
Algorithm: types.StringValue(backend.Algorithm),
GUID: types.StringValue(backend.GUID),
Name: types.StringValue(backend.Name),
ServerDefaultSettings: flattenServersSettings(ctx, backend.ServerDefaultSettings),
Servers: flattenServersInLB(ctx, backend.Servers),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemBackend, b)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenBackendsInLB struct to obj", diags))
}
tempSlice = append(tempSlice, obj)
}
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemBackend}, tempSlice)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenBackendsInLB", diags))
}
tflog.Info(ctx, "End flattenBackendsInLB")
return res
}
func flattenFrontendsInLB(ctx context.Context, frontends []lb.ItemFrontend) types.List {
tflog.Info(ctx, "Start flattenFrontendsInLB")
tempSlice := make([]types.Object, 0, len(frontends))
for _, frontend := range frontends {
b := models.ItemFrontendModel{
Backend: types.StringValue(frontend.Backend),
Bindings: flattenBindingsInLB(ctx, frontend.Bindings),
GUID: types.StringValue(frontend.GUID),
Name: types.StringValue(frontend.Name),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemFrontend, b)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenFrontendsInLB struct to obj", diags))
}
tempSlice = append(tempSlice, obj)
}
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemFrontend}, tempSlice)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenFrontendsInLB", diags))
}
tflog.Info(ctx, "End flattenFrontendsInLB")
return res
}
func flattenNodeInLB(ctx context.Context, node lb.Node) types.Object {
tflog.Info(ctx, "Start flattenNodeInLB")
n := models.RecordNodeModel{
BackendIP: types.StringValue(node.BackendIP),
ComputeID: types.Int64Value(int64(node.ComputeID)),
FrontendIP: types.StringValue(node.FrontendIP),
GUID: types.StringValue(node.GUID),
MGMTIP: types.StringValue(node.MGMTIP),
NetworkID: types.Int64Value(int64(node.NetworkID)),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemNode, n)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenNodeInLB struct to obj", diags))
}
tflog.Info(ctx, "End flattenNodeInLB")
return obj
}
func flattenServersSettings(ctx context.Context, settings lb.ServerSettings) types.Object {
tflog.Info(ctx, "Start flattenServersSettings")
s := models.RecordServerSettingsModel{
Inter: types.Int64Value(int64(settings.Inter)),
GUID: types.StringValue(settings.GUID),
DownInter: types.Int64Value(int64(settings.DownInter)),
Rise: types.Int64Value(int64(settings.Rise)),
Fall: types.Int64Value(int64(settings.Fall)),
SlowStart: types.Int64Value(int64(settings.SlowStart)),
MaxConn: types.Int64Value(int64(settings.MaxConn)),
MaxQueue: types.Int64Value(int64(settings.MaxQueue)),
Weight: types.Int64Value(int64(settings.Weight)),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemServerSettings, s)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenServersSettings struct to obj", diags))
}
tflog.Info(ctx, "End flattenServersSettings")
return obj
}
func flattenServersInLB(ctx context.Context, servers []lb.ItemServer) types.List {
tflog.Info(ctx, "Start flattenServersInLBBackend")
tempSlice := make([]types.Object, 0, len(servers))
for _, server := range servers {
s := models.RecordServerModel{
Address: types.StringValue(server.Address),
Check: types.StringValue(server.Check),
GUID: types.StringValue(server.GUID),
Name: types.StringValue(server.Name),
Port: types.Int64Value(int64(server.Port)),
ServerSettings: flattenServersSettings(ctx, server.ServerSettings),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemServers, s)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenServersInLBBackend struct to obj", diags))
}
tempSlice = append(tempSlice, obj)
}
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemServers}, tempSlice)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenServersInLBBackend", diags))
}
tflog.Info(ctx, "End flattenServersInLBBackend")
return res
}
func flattenBindingsInLB(ctx context.Context, bindings []lb.ItemBinding) types.List {
tflog.Info(ctx, "Start flattenBindingsInLBFrontend")
tempSlice := make([]types.Object, 0, len(bindings))
for _, binding := range bindings {
s := models.ItemBindingModel{
Address: types.StringValue(binding.Address),
GUID: types.StringValue(binding.GUID),
Name: types.StringValue(binding.Name),
Port: types.Int64Value(int64(binding.Port)),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemBindings, s)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenBindingsInLBFrontend struct to obj", diags))
}
tempSlice = append(tempSlice, obj)
}
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemBindings}, tempSlice)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenBindingsInLBFrontend", diags))
}
tflog.Info(ctx, "End flattenBindingsInLBFrontend")
return res
}

@ -0,0 +1,93 @@
package flattens
import (
"context"
"encoding/json"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities"
)
func LBListDataSource(ctx context.Context, state *models.DataSourceLBList, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.LBListDataSource")
diags := diag.Diagnostics{}
lbList, err := utilities.LBListDataSourceCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Cannot get info about list lb", err.Error())
return diags
}
tflog.Info(ctx, "flattens.LBListDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceLBList{
ByID: state.ByID,
Name: state.Name,
AccountID: state.AccountID,
RgID: state.RgID,
TechStatus: state.TechStatus,
Status: state.Status,
FrontIP: state.FrontIP,
BackIP: state.BackIP,
IncludeDeleted: state.IncludeDeleted,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
}
items := make([]models.ItemsLBListModel, 0, len(lbList.Data))
for _, lbItem := range lbList.Data {
acl, _ := json.Marshal(lbItem.ACL)
item := models.ItemsLBListModel{
HAMode: types.BoolValue(lbItem.HAMode),
ACL: types.StringValue(string(acl)),
BackendHAIP: types.StringValue(lbItem.BackendHAIP),
Backends: flattenBackendsInLB(ctx, lbItem.Backends),
CreatedBy: types.StringValue(lbItem.CreatedBy),
CreatedTime: types.Int64Value(int64(lbItem.CreatedTime)),
DeletedBy: types.StringValue(lbItem.DeletedBy),
DeletedTime: types.Int64Value(int64(lbItem.DeletedTime)),
Description: types.StringValue(lbItem.Description),
DPAPIUser: types.StringValue(lbItem.DPAPIUser),
DPAPIPassword: types.StringValue(lbItem.DPAPIPassword),
ExtNetID: types.Int64Value(int64(lbItem.ExtNetID)),
FrontendHAIP: types.StringValue(lbItem.FrontendHAIP),
Frontends: flattenFrontendsInLB(ctx, lbItem.Frontends),
GID: types.Int64Value(int64(lbItem.GID)),
GUID: types.Int64Value(int64(lbItem.GUID)),
LBID: types.Int64Value(int64(lbItem.ID)),
Milestones: types.Int64Value(int64(lbItem.Milestones)),
Name: types.StringValue(lbItem.Name),
PrimaryNode: flattenNodeInLB(ctx, lbItem.PrimaryNode),
RGID: types.Int64Value(int64(lbItem.RGID)),
RGName: types.StringValue(lbItem.RGName),
SecondaryNode: flattenNodeInLB(ctx, lbItem.SecondaryNode),
Status: types.StringValue(lbItem.Status),
TechStatus: types.StringValue(lbItem.TechStatus),
UpdatedBy: types.StringValue(lbItem.UpdatedBy),
UpdatedTime: types.Int64Value(int64(lbItem.UpdatedTime)),
VINSID: types.Int64Value(int64(lbItem.VINSID)),
}
items = append(items, item)
}
state.Items = items
state.EntryCount = types.Int64Value(int64(lbList.EntryCount))
tflog.Info(ctx, "flattens.LBListDataSource: after flatten")
tflog.Info(ctx, "End flattens.LBListDataSource")
return nil
}

@ -0,0 +1,91 @@
package flattens
import (
"context"
"encoding/json"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities"
)
func LBListDeletedDataSource(ctx context.Context, state *models.DataSourceLBListDeleted, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.LBListDeletedDataSource")
diags := diag.Diagnostics{}
lbList, err := utilities.LBListDeletedDataSourceCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Cannot get info about list deleted", err.Error())
return diags
}
tflog.Info(ctx, "flattens.LBListDeletedDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceLBListDeleted{
ByID: state.ByID,
Name: state.Name,
AccountID: state.AccountID,
RgID: state.RgID,
TechStatus: state.TechStatus,
FrontIP: state.FrontIP,
BackIP: state.BackIP,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
}
items := make([]models.ItemsLBListDeletedModel, 0, len(lbList.Data))
for _, lbItem := range lbList.Data {
acl, _ := json.Marshal(lbItem.ACL)
item := models.ItemsLBListDeletedModel{
HAMode: types.BoolValue(lbItem.HAMode),
ACL: types.StringValue(string(acl)),
BackendHAIP: types.StringValue(lbItem.BackendHAIP),
Backends: flattenBackendsInLB(ctx, lbItem.Backends),
CreatedBy: types.StringValue(lbItem.CreatedBy),
CreatedTime: types.Int64Value(int64(lbItem.CreatedTime)),
DeletedBy: types.StringValue(lbItem.DeletedBy),
DeletedTime: types.Int64Value(int64(lbItem.DeletedTime)),
Description: types.StringValue(lbItem.Description),
DPAPIUser: types.StringValue(lbItem.DPAPIUser),
DPAPIPassword: types.StringValue(lbItem.DPAPIPassword),
ExtNetID: types.Int64Value(int64(lbItem.ExtNetID)),
FrontendHAIP: types.StringValue(lbItem.FrontendHAIP),
Frontends: flattenFrontendsInLB(ctx, lbItem.Frontends),
GID: types.Int64Value(int64(lbItem.GID)),
GUID: types.Int64Value(int64(lbItem.GUID)),
LBID: types.Int64Value(int64(lbItem.ID)),
Milestones: types.Int64Value(int64(lbItem.Milestones)),
Name: types.StringValue(lbItem.Name),
PrimaryNode: flattenNodeInLB(ctx, lbItem.PrimaryNode),
RGID: types.Int64Value(int64(lbItem.RGID)),
RGName: types.StringValue(lbItem.RGName),
SecondaryNode: flattenNodeInLB(ctx, lbItem.SecondaryNode),
Status: types.StringValue(lbItem.Status),
TechStatus: types.StringValue(lbItem.TechStatus),
UpdatedBy: types.StringValue(lbItem.UpdatedBy),
UpdatedTime: types.Int64Value(int64(lbItem.UpdatedTime)),
VINSID: types.Int64Value(int64(lbItem.VINSID)),
}
items = append(items, item)
}
state.Items = items
state.EntryCount = types.Int64Value(int64(lbList.EntryCount))
tflog.Info(ctx, "flattens.LBListDeletedDataSource: after flatten")
tflog.Info(ctx, "End flattens.LBListDeletedDataSource")
return nil
}

@ -0,0 +1,72 @@
package flattens
import (
"context"
"encoding/json"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities"
)
func LBResource(ctx context.Context, plan *models.ResourceLBModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.LBResource")
diags := diag.Diagnostics{}
recordItemLB, diags := utilities.LBResourceCheckPresence(ctx, plan, c)
if diags.HasError() {
return diags
}
acl, _ := json.Marshal(recordItemLB.ACL)
*plan = models.ResourceLBModel{
RGID: plan.RGID,
Name: plan.Name,
ExtNetID: plan.ExtNetID,
VINSID: plan.VINSID,
Start: plan.Start,
ID: plan.ID,
HAMode: types.BoolValue(recordItemLB.HAMode),
Safe: plan.Safe,
Timeouts: plan.Timeouts,
SysctlParams: plan.SysctlParams,
Permanently: plan.Permanently,
Restart: plan.Restart,
Enable: plan.Enable,
ConfigReset: plan.ConfigReset,
ACL: types.StringValue(string(acl)),
BackendHAIP: types.StringValue(recordItemLB.BackendHAIP),
Backends: flattenBackendsInLB(ctx, recordItemLB.Backends),
CKey: types.StringValue(recordItemLB.CKey),
Description: types.StringValue(recordItemLB.Description),
DPAPIUser: types.StringValue(recordItemLB.DPAPIUser),
DPAPIPassword: types.StringValue(recordItemLB.DPAPIPassword),
FrontendHAIP: types.StringValue(recordItemLB.FrontendHAIP),
Frontends: flattenFrontendsInLB(ctx, recordItemLB.Frontends),
GID: types.Int64Value(int64(recordItemLB.GID)),
GUID: types.Int64Value(int64(recordItemLB.GUID)),
ImageID: types.Int64Value(int64(recordItemLB.ImageID)),
LBID: types.Int64Value(int64(recordItemLB.ID)),
Meta: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &recordItemLB.Meta),
Milestones: types.Int64Value(int64(recordItemLB.Milestones)),
ManagerId: types.Int64Value(int64(recordItemLB.ManagerId)),
ManagerType: types.StringValue(recordItemLB.ManagerType),
PartK8s: types.BoolValue(recordItemLB.PartK8s),
PrimaryNode: flattenNodeInLB(ctx, recordItemLB.PrimaryNode),
SecondaryNode: flattenNodeInLB(ctx, recordItemLB.SecondaryNode),
Status: types.StringValue(recordItemLB.Status),
TechStatus: types.StringValue(recordItemLB.TechStatus),
UserManaged: types.BoolValue(recordItemLB.UserManaged),
}
tflog.Info(ctx, "End flattens.LBResource", map[string]any{"id": plan.ID.ValueString()})
return nil
}

@ -0,0 +1,44 @@
package flattens
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities"
)
func LBBackendResource(ctx context.Context, plan *models.ResourceLBBackendModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.LBBackendResource")
diags := diag.Diagnostics{}
recordItemBackend, diags := utilities.LBBackendResourceCheckPresence(ctx, plan, c)
if diags.HasError() {
return diags
}
*plan = models.ResourceLBBackendModel{
LBID: plan.LBID,
Name: plan.Name,
ID: plan.ID,
Timeouts: plan.Timeouts,
GUID: types.StringValue(recordItemBackend.GUID),
Algorithm: types.StringValue(recordItemBackend.Algorithm),
DownInter: types.Int64Value(int64(recordItemBackend.ServerDefaultSettings.DownInter)),
Fall: types.Int64Value(int64(recordItemBackend.ServerDefaultSettings.Fall)),
Inter: types.Int64Value(int64(recordItemBackend.ServerDefaultSettings.Inter)),
MaxConn: types.Int64Value(int64(recordItemBackend.ServerDefaultSettings.MaxConn)),
MaxQueue: types.Int64Value(int64(recordItemBackend.ServerDefaultSettings.MaxQueue)),
Rise: types.Int64Value(int64(recordItemBackend.ServerDefaultSettings.Rise)),
SlowStart: types.Int64Value(int64(recordItemBackend.ServerDefaultSettings.SlowStart)),
Weight: types.Int64Value(int64(recordItemBackend.ServerDefaultSettings.Weight)),
Servers: flattenServersInLB(ctx, recordItemBackend.Servers),
}
tflog.Info(ctx, "End flattens.LBBackendResource", map[string]any{"name": plan.Name.ValueString()})
return nil
}

@ -0,0 +1,45 @@
package flattens
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities"
)
func LBBackendServerResource(ctx context.Context, plan *models.ResourceLBBackendServerModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.LBBackendServerResource")
diags := diag.Diagnostics{}
recordItemServer, diags := utilities.LBBackendServerResourceCheckPresence(ctx, plan, c)
if diags.HasError() {
return diags
}
*plan = models.ResourceLBBackendServerModel{
LBID: plan.LBID,
Backend: plan.Backend,
Name: plan.Name,
Address: plan.Address,
Port: plan.Port,
ID: plan.ID,
Timeouts: plan.Timeouts,
Check: types.StringValue(recordItemServer.Check),
Inter: types.Int64Value(int64(recordItemServer.ServerSettings.Inter)),
DownInter: types.Int64Value(int64(recordItemServer.ServerSettings.DownInter)),
Rise: types.Int64Value(int64(recordItemServer.ServerSettings.Rise)),
Fall: types.Int64Value(int64(recordItemServer.ServerSettings.Fall)),
SlowStart: types.Int64Value(int64(recordItemServer.ServerSettings.SlowStart)),
MaxConn: types.Int64Value(int64(recordItemServer.ServerSettings.MaxConn)),
MaxQueue: types.Int64Value(int64(recordItemServer.ServerSettings.MaxQueue)),
Weight: types.Int64Value(int64(recordItemServer.ServerSettings.Weight)),
}
tflog.Info(ctx, "End flattens.LBBackendServerResource", map[string]any{"name": plan.Name.ValueString()})
return nil
}

@ -0,0 +1,36 @@
package flattens
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities"
)
func LBFrontendResource(ctx context.Context, plan *models.ResourceLBFrontendModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.LBFrontendResource")
diags := diag.Diagnostics{}
recordItemFrontend, diags := utilities.LBFrontendResourceCheckPresence(ctx, plan, c)
if diags.HasError() {
return diags
}
*plan = models.ResourceLBFrontendModel{
LBID: plan.LBID,
Name: plan.Name,
Backend: plan.Backend,
ID: plan.ID,
Timeouts: plan.Timeouts,
GUID: types.StringValue(recordItemFrontend.GUID),
Bindings: flattenBindingsInLB(ctx, recordItemFrontend.Bindings),
}
tflog.Info(ctx, "End flattens.LBFrontendResource", map[string]any{"name": plan.Name.ValueString()})
return nil
}

@ -0,0 +1,37 @@
package flattens
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities"
)
func LBFrontendBindResource(ctx context.Context, plan *models.ResourceLBFrontendBindModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.LBFrontendBindResource")
diags := diag.Diagnostics{}
recordItemFrontendBind, diags := utilities.LBFrontendBindResourceCheckPresence(ctx, plan, c)
if diags.HasError() {
return diags
}
*plan = models.ResourceLBFrontendBindModel{
Address: plan.Address,
Frontend: plan.Frontend,
LBID: plan.LBID,
Name: plan.Name,
ID: plan.ID,
Timeouts: plan.Timeouts,
GUID: types.StringValue(recordItemFrontendBind.GUID),
Port: plan.Port,
}
tflog.Info(ctx, "End flattens.LBFrontendBindResource", map[string]any{"name": plan.Name.ValueString()})
return nil
}

@ -0,0 +1,125 @@
package lb
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/ic"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models"
)
// resourceLBInputChecks checks if rg_id, extnet_id and vins_id are valid.
func resourceLBInputChecks(ctx context.Context, plan *models.ResourceLBModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
extNetId := uint64(plan.ExtNetID.ValueInt64())
vinsId := uint64(plan.VINSID.ValueInt64())
if extNetId == 0 && vinsId == 0 {
diags.AddError(fmt.Sprintf("Unable to validate vins_id and extnet_id"), "vins_id and ext_net_id cannot be both in the value 0")
return diags
}
rgID := uint64(plan.RGID.ValueInt64())
tflog.Info(ctx, "resourceLBInputChecks: exist resource rg", map[string]any{"rg_id": rgID})
err := ic.ExistRG(ctx, rgID, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about rg with ID %v", rgID), err.Error())
}
tflog.Info(ctx, "resourceLBInputChecks: exist resource extNet", map[string]any{" extnet_id": extNetId})
err = ic.ExistExtNetInLb(ctx, extNetId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about extNet with ID %v", extNetId), err.Error())
}
tflog.Info(ctx, "resourceLBInputChecks: exist resource VINS", map[string]any{" vins_id": vinsId})
err = ic.ExistVinsInLb(ctx, vinsId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about VINS with ID %v", vinsId), err.Error())
}
return diags
}
// resourceLBFrontendBindInputChecks checks if lb_id and backend_name are valid.
func resourceLBFrontendBindInputChecks(ctx context.Context, plan *models.ResourceLBFrontendBindModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
lbID := uint64(plan.LBID.ValueInt64())
fName := plan.Frontend.ValueString()
tflog.Info(ctx, "resourceLBFrontendBindInputChecks: exist resource lb", map[string]any{"lb_id": lbID})
err := ic.ExistLB(ctx, lbID, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about lb with ID %v", lbID), err.Error())
return diags
}
tflog.Info(ctx, "resourceLBFrontendInputChecks: exist resource lbFrontend", map[string]any{"name": fName})
err = ic.ExistLBFrontend(ctx, lbID, fName, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about frontend with name %v", fName), err.Error())
return diags
}
return diags
}
// resourceLBFrontendInputChecks checks if lb_id and backend_name are valid.
func resourceLBFrontendInputChecks(ctx context.Context, plan *models.ResourceLBFrontendModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
lbID := uint64(plan.LBID.ValueInt64())
bName := plan.Backend.ValueString()
tflog.Info(ctx, "resourceLBFrontendInputChecks: exist resource lb", map[string]any{"lb_id": lbID})
err := ic.ExistLB(ctx, lbID, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about lb with ID %v", lbID), err.Error())
return diags
}
tflog.Info(ctx, "resourceLBFrontendInputChecks: exist resource lbBackend", map[string]any{"name": bName})
err = ic.ExistLBBackend(ctx, lbID, bName, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about backend with name %v", bName), err.Error())
return diags
}
return diags
}
// resourceLBBackendServerInputChecks checks if lb_id and backend_name are valid.
func resourceLBBackendServerInputChecks(ctx context.Context, plan *models.ResourceLBBackendServerModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
lbID := uint64(plan.LBID.ValueInt64())
bName := plan.Backend.ValueString()
tflog.Info(ctx, "resourceLBBackendServerInputChecks: exist resource lb", map[string]any{"lb_id": lbID})
err := ic.ExistLB(ctx, lbID, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about lb with ID %v", lbID), err.Error())
} else {
tflog.Info(ctx, "resourceLBBackendServerInputChecks: exist resource lbBackend", map[string]any{"name": bName})
err = ic.ExistLBBackend(ctx, lbID, bName, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about backend with name %v", bName), err.Error())
}
}
return diags
}
// resourceLBBackendInputChecks checks if lb_id are valid.
func resourceLBBackendInputChecks(ctx context.Context, plan *models.ResourceLBBackendModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
lbID := uint64(plan.LBID.ValueInt64())
tflog.Info(ctx, "resourceLBBackendInputChecks: exist resource lb", map[string]any{"lb_id": lbID})
err := ic.ExistLB(ctx, lbID, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about lb with ID %v", lbID), err.Error())
}
return diags
}

@ -0,0 +1,147 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceLB struct {
// required fields
Timeouts timeouts.Value `tfsdk:"timeouts"`
LBID types.Int64 `tfsdk:"lb_id"`
// response fields
HAMode types.Bool `tfsdk:"ha_mode"`
ACL types.String `tfsdk:"acl"`
BackendHAIP types.String `tfsdk:"backend_haip"`
Backends types.List `tfsdk:"backends"`
CKey types.String `tfsdk:"ckey"`
Description types.String `tfsdk:"desc"`
DPAPIUser types.String `tfsdk:"dp_api_user"`
DPAPIPassword types.String `tfsdk:"dp_api_password"`
ExtNetID types.Int64 `tfsdk:"extnet_id"`
FrontendHAIP types.String `tfsdk:"frontend_haip"`
Frontends types.List `tfsdk:"frontends"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
ID types.Int64 `tfsdk:"id"`
ImageID types.Int64 `tfsdk:"image_id"`
ManagerId types.Int64 `tfsdk:"manager_id"`
ManagerType types.String `tfsdk:"manager_type"`
Meta types.List `tfsdk:"meta"`
Milestones types.Int64 `tfsdk:"milestones"`
Name types.String `tfsdk:"name"`
PartK8s types.Bool `tfsdk:"part_k8s"`
PrimaryNode types.Object `tfsdk:"primary_node"`
RGID types.Int64 `tfsdk:"rg_id"`
SecondaryNode types.Object `tfsdk:"secondary_node"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
UserManaged types.Bool `tfsdk:"user_managed"`
VINSID types.Int64 `tfsdk:"vins_id"`
}
type ItemBackendModel struct {
Algorithm types.String `tfsdk:"algorithm"`
GUID types.String `tfsdk:"guid"`
Name types.String `tfsdk:"name"`
ServerDefaultSettings types.Object `tfsdk:"server_default_settings"`
Servers types.List `tfsdk:"servers"`
}
type ItemFrontendModel struct {
Backend types.String `tfsdk:"backend"`
Bindings types.List `tfsdk:"bindings"`
GUID types.String `tfsdk:"guid"`
Name types.String `tfsdk:"name"`
}
type RecordNodeModel struct {
BackendIP types.String `tfsdk:"backend_ip"`
ComputeID types.Int64 `tfsdk:"compute_id"`
FrontendIP types.String `tfsdk:"frontend_ip"`
GUID types.String `tfsdk:"guid"`
MGMTIP types.String `tfsdk:"mgmt_ip"`
NetworkID types.Int64 `tfsdk:"network_id"`
}
type RecordServerSettingsModel struct {
Inter types.Int64 `tfsdk:"inter"`
GUID types.String `tfsdk:"guid"`
DownInter types.Int64 `tfsdk:"downinter"`
Rise types.Int64 `tfsdk:"rise"`
Fall types.Int64 `tfsdk:"fall"`
SlowStart types.Int64 `tfsdk:"slowstart"`
MaxConn types.Int64 `tfsdk:"maxconn"`
MaxQueue types.Int64 `tfsdk:"maxqueue"`
Weight types.Int64 `tfsdk:"weight"`
}
type RecordServerModel struct {
Address types.String `tfsdk:"address"`
Check types.String `tfsdk:"check"`
GUID types.String `tfsdk:"guid"`
Name types.String `tfsdk:"name"`
Port types.Int64 `tfsdk:"port"`
ServerSettings types.Object `tfsdk:"server_settings"`
}
type ItemBindingModel struct {
Address types.String `tfsdk:"address"`
GUID types.String `tfsdk:"guid"`
Name types.String `tfsdk:"name"`
Port types.Int64 `tfsdk:"port"`
}
var ItemNode = map[string]attr.Type{
"backend_ip": types.StringType,
"compute_id": types.Int64Type,
"frontend_ip": types.StringType,
"guid": types.StringType,
"mgmt_ip": types.StringType,
"network_id": types.Int64Type,
}
var ItemBackend = map[string]attr.Type{
"algorithm": types.StringType,
"guid": types.StringType,
"name": types.StringType,
"server_default_settings": types.ObjectType{AttrTypes: ItemServerSettings},
"servers": types.ListType{ElemType: types.ObjectType{AttrTypes: ItemServers}},
}
var ItemFrontend = map[string]attr.Type{
"backend": types.StringType,
"bindings": types.ListType{ElemType: types.ObjectType{AttrTypes: ItemBindings}},
"guid": types.StringType,
"name": types.StringType,
}
var ItemServers = map[string]attr.Type{
"address": types.StringType,
"check": types.StringType,
"guid": types.StringType,
"name": types.StringType,
"port": types.Int64Type,
"server_settings": types.ObjectType{AttrTypes: ItemServerSettings},
}
var ItemServerSettings = map[string]attr.Type{
"inter": types.Int64Type,
"guid": types.StringType,
"downinter": types.Int64Type,
"rise": types.Int64Type,
"fall": types.Int64Type,
"slowstart": types.Int64Type,
"maxconn": types.Int64Type,
"maxqueue": types.Int64Type,
"weight": types.Int64Type,
}
var ItemBindings = map[string]attr.Type{
"address": types.StringType,
"name": types.StringType,
"port": types.Int64Type,
"guid": types.StringType,
}

@ -0,0 +1,64 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceLBList struct {
// optional fields
ByID types.Int64 `tfsdk:"by_id"`
Name types.String `tfsdk:"name"`
AccountID types.Int64 `tfsdk:"account_id"`
RgID types.Int64 `tfsdk:"rg_id"`
TechStatus types.String `tfsdk:"tech_status"`
Status types.String `tfsdk:"status"`
FrontIP types.String `tfsdk:"front_ip"`
BackIP types.String `tfsdk:"back_ip"`
IncludeDeleted types.Bool `tfsdk:"include_deleted"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemsLBListModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemsLBListModel struct {
HAMode types.Bool `tfsdk:"ha_mode"`
ACL types.String `tfsdk:"acl"`
BackendHAIP types.String `tfsdk:"backend_haip"`
Backends types.List `tfsdk:"backends"`
CreatedBy types.String `tfsdk:"created_by"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedBy types.String `tfsdk:"deleted_by"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Description types.String `tfsdk:"desc"`
DPAPIUser types.String `tfsdk:"dp_api_user"`
DPAPIPassword types.String `tfsdk:"dp_api_password"`
ExtNetID types.Int64 `tfsdk:"extnet_id"`
FrontendHAIP types.String `tfsdk:"frontend_haip"`
Frontends types.List `tfsdk:"frontends"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
LBID types.Int64 `tfsdk:"lb_id"`
ManagerId types.Int64 `tfsdk:"manager_id"`
ManagerType types.String `tfsdk:"manager_type"`
Milestones types.Int64 `tfsdk:"milestones"`
Name types.String `tfsdk:"name"`
PartK8s types.Bool `tfsdk:"part_k8s"`
PrimaryNode types.Object `tfsdk:"primary_node"`
RGName types.String `tfsdk:"rg_name"`
RGID types.Int64 `tfsdk:"rg_id"`
SecondaryNode types.Object `tfsdk:"secondary_node"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
UpdatedBy types.String `tfsdk:"updated_by"`
UpdatedTime types.Int64 `tfsdk:"updated_time"`
UserManaged types.Bool `tfsdk:"user_managed"`
VINSID types.Int64 `tfsdk:"vins_id"`
}

@ -0,0 +1,62 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceLBListDeleted struct {
// optional fields
ByID types.Int64 `tfsdk:"by_id"`
Name types.String `tfsdk:"name"`
AccountID types.Int64 `tfsdk:"account_id"`
RgID types.Int64 `tfsdk:"rg_id"`
TechStatus types.String `tfsdk:"tech_status"`
FrontIP types.String `tfsdk:"front_ip"`
BackIP types.String `tfsdk:"back_ip"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemsLBListDeletedModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemsLBListDeletedModel struct {
HAMode types.Bool `tfsdk:"ha_mode"`
ACL types.String `tfsdk:"acl"`
BackendHAIP types.String `tfsdk:"backend_haip"`
Backends types.List `tfsdk:"backends"`
CreatedBy types.String `tfsdk:"created_by"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedBy types.String `tfsdk:"deleted_by"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Description types.String `tfsdk:"desc"`
DPAPIUser types.String `tfsdk:"dp_api_user"`
DPAPIPassword types.String `tfsdk:"dp_api_password"`
ExtNetID types.Int64 `tfsdk:"extnet_id"`
FrontendHAIP types.String `tfsdk:"frontend_haip"`
Frontends types.List `tfsdk:"frontends"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
LBID types.Int64 `tfsdk:"lb_id"`
ManagerId types.Int64 `tfsdk:"manager_id"`
ManagerType types.String `tfsdk:"manager_type"`
Milestones types.Int64 `tfsdk:"milestones"`
Name types.String `tfsdk:"name"`
PartK8s types.Bool `tfsdk:"part_k8s"`
PrimaryNode types.Object `tfsdk:"primary_node"`
RGName types.String `tfsdk:"rg_name"`
RGID types.Int64 `tfsdk:"rg_id"`
SecondaryNode types.Object `tfsdk:"secondary_node"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
UpdatedBy types.String `tfsdk:"updated_by"`
UpdatedTime types.Int64 `tfsdk:"updated_time"`
UserManaged types.Bool `tfsdk:"user_managed"`
VINSID types.Int64 `tfsdk:"vins_id"`
}

@ -0,0 +1,52 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ResourceLBModel struct {
// required fields
RGID types.Int64 `tfsdk:"rg_id"`
Name types.String `tfsdk:"name"`
ExtNetID types.Int64 `tfsdk:"extnet_id"`
VINSID types.Int64 `tfsdk:"vins_id"`
Start types.Bool `tfsdk:"start"`
// optional fields
HAMode types.Bool `tfsdk:"ha_mode"`
ACL types.String `tfsdk:"acl"`
Description types.String `tfsdk:"desc"`
Enable types.Bool `tfsdk:"enable"`
Restart types.Bool `tfsdk:"restart"`
ConfigReset types.Bool `tfsdk:"config_reset"`
Permanently types.Bool `tfsdk:"permanently"`
Restore types.Bool `tfsdk:"restore"`
Safe types.Bool `tfsdk:"safe"`
SysctlParams types.List `tfsdk:"sysctl_params"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
BackendHAIP types.String `tfsdk:"backend_haip"`
Backends types.List `tfsdk:"backends"`
CKey types.String `tfsdk:"ckey"`
DPAPIUser types.String `tfsdk:"dp_api_user"`
DPAPIPassword types.String `tfsdk:"dp_api_password"`
FrontendHAIP types.String `tfsdk:"frontend_haip"`
Frontends types.List `tfsdk:"frontends"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
ID types.String `tfsdk:"id"`
ImageID types.Int64 `tfsdk:"image_id"`
LBID types.Int64 `tfsdk:"lb_id"`
ManagerId types.Int64 `tfsdk:"manager_id"`
ManagerType types.String `tfsdk:"manager_type"`
Meta types.List `tfsdk:"meta"`
Milestones types.Int64 `tfsdk:"milestones"`
PartK8s types.Bool `tfsdk:"part_k8s"`
PrimaryNode types.Object `tfsdk:"primary_node"`
SecondaryNode types.Object `tfsdk:"secondary_node"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
UserManaged types.Bool `tfsdk:"user_managed"`
}

@ -0,0 +1,24 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ResourceLBBackendModel struct {
Algorithm types.String `tfsdk:"algorithm"`
LBID types.Int64 `tfsdk:"lb_id"`
Name types.String `tfsdk:"name"`
ID types.String `tfsdk:"id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
GUID types.String `tfsdk:"guid"`
Inter types.Int64 `tfsdk:"inter"`
DownInter types.Int64 `tfsdk:"downinter"`
Rise types.Int64 `tfsdk:"rise"`
Fall types.Int64 `tfsdk:"fall"`
SlowStart types.Int64 `tfsdk:"slowstart"`
MaxConn types.Int64 `tfsdk:"maxconn"`
MaxQueue types.Int64 `tfsdk:"maxqueue"`
Weight types.Int64 `tfsdk:"weight"`
Servers types.List `tfsdk:"servers"`
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save