1.0.1
This commit is contained in:
91
internal/service/cloudbroker/rg/data_source_rg.go
Normal file
91
internal/service/cloudbroker/rg/data_source_rg.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRG{}
|
||||
)
|
||||
|
||||
func NewDataSourceRG() datasource.DataSource {
|
||||
return &dataSourceRG{}
|
||||
}
|
||||
|
||||
// dataSourceRG is the data source implementation.
|
||||
type dataSourceRG struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRG) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRG: Error get state")
|
||||
return
|
||||
}
|
||||
rgId := uint64(state.RGID.ValueInt64())
|
||||
tflog.Info(ctx, "Read dataSourceRG: got state successfully", map[string]any{"rg_id": rgId})
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRG: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRG: set timeouts successfully", map[string]any{
|
||||
"rg_id": rgId,
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRG: Error flatten data source rg")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRG: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read resource group", map[string]any{"rg_id": rgId})
|
||||
}
|
||||
|
||||
func (d *dataSourceRG) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRG(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRG) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_rg"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRG) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRG")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRG successfully")
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGAffinityGroupComputes{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGAffinityGroupComputes() datasource.DataSource {
|
||||
return &dataSourceRGAffinityGroupComputes{}
|
||||
}
|
||||
|
||||
// dataSourceRGAffinityGroupComputes is the data source implementation.
|
||||
type dataSourceRGAffinityGroupComputes struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupComputes) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGAffinityGroupComputesModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupComputes: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGAffinityGroupComputes: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupComputes: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGAffinityGroupComputes: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGAffinityGroupComputesDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupComputes: Error flatten data source rg affinity group computes")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupComputes: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGAffinityGroupComputes")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupComputes) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGAffinityGroupComputes(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupComputes) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_rg_affinity_group_computes"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGAffinityGroupComputes) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGAffinityGroupComputes")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGAffinityGroupComputes successfully")
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGAffinityGroupsGet{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGAffinityGroupsGet() datasource.DataSource {
|
||||
return &dataSourceRGAffinityGroupsGet{}
|
||||
}
|
||||
|
||||
// dataSourceRGAffinityGroupsGet is the data source implementation.
|
||||
type dataSourceRGAffinityGroupsGet struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupsGet) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGAffinityGroupsGetModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupsGet: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGAffinityGroupsGet: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupsGet: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGAffinityGroupsGet: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGAffinityGroupsGetDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupsGet: Error flatten data source rg affinity group get")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupsGet: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGAffinityGroupsGet")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupsGet) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGAffinityGroupsGet(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupsGet) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_rg_affinity_groups_get"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGAffinityGroupsGet) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGAffinityGroupsGet")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGAffinityGroupsGet successfully")
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGAffinityGroupsList{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGAffinityGroupsList() datasource.DataSource {
|
||||
return &dataSourceRGAffinityGroupsList{}
|
||||
}
|
||||
|
||||
// dataSourceRGAffinityGroupsList is the data source implementation.
|
||||
type dataSourceRGAffinityGroupsList struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupsList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGAffinityGroupsListModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupsList: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGAffinityGroupsList: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupsList: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGAffinityGroupsList: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGAffinityGroupsListDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupsList: Error flatten data source rg resource consumption list")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupsList: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGAffinityGroupsList")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupsList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGAffinityGroupsList(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupsList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_rg_affinity_groups_list"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGAffinityGroupsList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGAffinityGroupsList")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGAffinityGroupsList successfully")
|
||||
}
|
||||
89
internal/service/cloudbroker/rg/data_source_rg_audits.go
Normal file
89
internal/service/cloudbroker/rg/data_source_rg_audits.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGAudits{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGAudits() datasource.DataSource {
|
||||
return &dataSourceRGAudits{}
|
||||
}
|
||||
|
||||
// dataSourceRGAudits is the data source implementation.
|
||||
type dataSourceRGAudits struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAudits) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGAuditsModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAudits: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGAudits: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAudits: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGAudits: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGAuditsDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAudits: Error flatten data source rg audits")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAudits: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGAudits")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAudits) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGAudits(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAudits) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_rg_audits"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGAudits) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGAudits")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGAudits successfully")
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGGetResourceConsumption{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGGetResourceConsumption() datasource.DataSource {
|
||||
return &dataSourceRGGetResourceConsumption{}
|
||||
}
|
||||
|
||||
// dataSourceRGGetResourceConsumption is the data source implementation.
|
||||
type dataSourceRGGetResourceConsumption struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGGetResourceConsumption) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGGetResourceConsumptionModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGGetResourceConsumption: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGGetResourceConsumption: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGGetResourceConsumption: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGGetResourceConsumption: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGGetResourceConsumptionDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGGetResourceConsumption: Error flatten data source rg get resource consumption")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGGetResourceConsumption: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGGetResourceConsumption")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGGetResourceConsumption) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGGetResourceConsumption(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGGetResourceConsumption) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_rg_resource_consumption_get"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGGetResourceConsumption) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGGetResourceConsumption")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGGetResourceConsumption successfully")
|
||||
}
|
||||
89
internal/service/cloudbroker/rg/data_source_rg_list.go
Normal file
89
internal/service/cloudbroker/rg/data_source_rg_list.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGList{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGList() datasource.DataSource {
|
||||
return &dataSourceRGList{}
|
||||
}
|
||||
|
||||
// dataSourceRGList is the data source implementation.
|
||||
type dataSourceRGList struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGListModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGList: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGList: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGList: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGList: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGListDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGList: Error flatten data source rg list")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGList: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGList")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGList(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_rg_list"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGList")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGList successfully")
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGListComputes{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGListComputes() datasource.DataSource {
|
||||
return &dataSourceRGListComputes{}
|
||||
}
|
||||
|
||||
// dataSourceRGListComputes is the data source implementation.
|
||||
type dataSourceRGListComputes struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListComputes) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGListComputesModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListComputes: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListComputes: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListComputes: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListComputes: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGListComputesDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListComputes: Error flatten data source rg list")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListComputes: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGListComputes")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListComputes) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGListComputes(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListComputes) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_rg_list_computes"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGListComputes) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGListComputes")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGListComputes successfully")
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGListDeleted{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGListDeleted() datasource.DataSource {
|
||||
return &dataSourceRGListDeleted{}
|
||||
}
|
||||
|
||||
// dataSourceRGListDeleted is the data source implementation.
|
||||
type dataSourceRGListDeleted struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListDeleted) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGListDeletedModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListDeleted: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListDeleted: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListDeleted: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListDeleted: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGListDeletedDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListDeleted: Error flatten data source rg list")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListDeleted: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGListDeleted")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListDeleted) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGListDeleted(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_rg_list_deleted"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGListDeleted) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGListDeleted")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGListDeleted successfully")
|
||||
}
|
||||
89
internal/service/cloudbroker/rg/data_source_rg_list_lb.go
Normal file
89
internal/service/cloudbroker/rg/data_source_rg_list_lb.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGListLB{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGListLB() datasource.DataSource {
|
||||
return &dataSourceRGListLB{}
|
||||
}
|
||||
|
||||
// dataSourceRGListLB is the data source implementation.
|
||||
type dataSourceRGListLB struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListLB) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGListLBModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListLB: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListLB: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListLB: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListLB: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGListLBDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListLB: Error flatten data source rg list lb")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListLB: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGListLB")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListLB) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGListLB(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListLB) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_rg_list_lb"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGListLB) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGListLB")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGListLB successfully")
|
||||
}
|
||||
89
internal/service/cloudbroker/rg/data_source_rg_list_pfw.go
Normal file
89
internal/service/cloudbroker/rg/data_source_rg_list_pfw.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGListPFW{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGListPFW() datasource.DataSource {
|
||||
return &dataSourceRGListPFW{}
|
||||
}
|
||||
|
||||
// dataSourceRGListPFW is the data source implementation.
|
||||
type dataSourceRGListPFW struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListPFW) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGListPFWModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListPFW: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListPFW: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListPFW: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListPFW: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGListPFWDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListPFW: Error flatten data source rg list pfw")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListPFW: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGListPFW")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListPFW) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGListPFW(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListPFW) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_rg_list_pfw"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGListPFW) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGListPFW")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGListPFW successfully")
|
||||
}
|
||||
89
internal/service/cloudbroker/rg/data_source_rg_list_vins.go
Normal file
89
internal/service/cloudbroker/rg/data_source_rg_list_vins.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGListVins{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGListVins() datasource.DataSource {
|
||||
return &dataSourceRGListVins{}
|
||||
}
|
||||
|
||||
// dataSourceRGListVins is the data source implementation.
|
||||
type dataSourceRGListVins struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListVins) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGListVinsModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListVins: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListVins: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListVins: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListVins: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGListVinsDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListVins: Error flatten data source rg list")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListVins: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGListVins")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListVins) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGListVins(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListVins) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_rg_list_vins"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGListVins) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGListVins")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGListVins successfully")
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGResourceConsumptionList{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGResourceConsumptionList() datasource.DataSource {
|
||||
return &dataSourceRGResourceConsumptionList{}
|
||||
}
|
||||
|
||||
// dataSourceRGResourceConsumptionList is the data source implementation.
|
||||
type dataSourceRGResourceConsumptionList struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGResourceConsumptionList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGResourceConsumptionListModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGResourceConsumptionList: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGResourceConsumptionList: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGResourceConsumptionList: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGResourceConsumptionList: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGResourceConsumptionListDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGResourceConsumptionList: Error flatten data source rg resource consumption list")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGResourceConsumptionList: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGResourceConsumptionList")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGResourceConsumptionList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGResourceConsumptionList(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGResourceConsumptionList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_rg_resource_consumption_list"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGResourceConsumptionList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGResourceConsumptionList")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGResourceConsumptionList successfully")
|
||||
}
|
||||
89
internal/service/cloudbroker/rg/data_source_rg_usage.go
Normal file
89
internal/service/cloudbroker/rg/data_source_rg_usage.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGUsage{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGUsage() datasource.DataSource {
|
||||
return &dataSourceRGUsage{}
|
||||
}
|
||||
|
||||
// dataSourceRGUsage is the data source implementation.
|
||||
type dataSourceRGUsage struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGUsage) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGUsageModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGUsage: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGUsage: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGUsage: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGUsage: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGUsageDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGUsage: Error flatten data source rg usage")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGUsage: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGUsage")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGUsage) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGUsage(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGUsage) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_rg_usage"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGUsage) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGUsage")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGUsage successfully")
|
||||
}
|
||||
@@ -0,0 +1,126 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities"
|
||||
)
|
||||
|
||||
// RGDataSource flattens data source for rg (resource group).
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGDataSource(ctx context.Context, state *models.DataSourceRGModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgId := uint64(state.RGID.ValueInt64())
|
||||
|
||||
recordRG, err := utilities.RGCheckPresence(ctx, rgId, c)
|
||||
if err != nil {
|
||||
diags.AddError(fmt.Sprintf("Cannot get info about resource group with ID %v", rgId), err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGDataSource: before flatten", map[string]any{"rg_id": rgId, "recordRG": recordRG})
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGModel{
|
||||
RGID: state.RGID,
|
||||
Reason: state.Reason,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
Id: types.StringValue(id.String()),
|
||||
AccountID: types.Int64Value(int64(recordRG.AccountID)),
|
||||
AccountName: types.StringValue(recordRG.AccountName),
|
||||
ACL: flattenACL(ctx, &recordRG.ACL),
|
||||
ComputeFeatures: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &recordRG.ComputeFeatures),
|
||||
Computes: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &recordRG.VMs),
|
||||
CPUAllocationParameter: types.StringValue(recordRG.CPUAllocationParameter),
|
||||
CPUAllocationRatio: types.Float64Value(recordRG.CPUAllocationRatio),
|
||||
CreatedBy: types.StringValue(recordRG.CreatedBy),
|
||||
CreatedTime: types.Int64Value(int64(recordRG.CreatedTime)),
|
||||
DefNetID: types.Int64Value(recordRG.DefNetID),
|
||||
DefNetType: types.StringValue(recordRG.DefNetType),
|
||||
DeletedBy: types.StringValue(recordRG.DeletedBy),
|
||||
DeletedTime: types.Int64Value(int64(recordRG.DeletedTime)),
|
||||
Description: types.StringValue(recordRG.Description),
|
||||
Dirty: types.BoolValue(recordRG.Dirty),
|
||||
GID: types.Int64Value(int64(recordRG.GID)),
|
||||
GUID: types.Int64Value(int64(recordRG.GUID)),
|
||||
LockStatus: types.StringValue(recordRG.LockStatus),
|
||||
Milestones: types.Int64Value(int64(recordRG.Milestones)),
|
||||
Name: types.StringValue(recordRG.Name),
|
||||
RegisterComputes: types.BoolValue(recordRG.RegisterComputes),
|
||||
ResourceLimits: flattenResourceLimits(ctx, &recordRG.ResourceLimits),
|
||||
ResourceTypes: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &recordRG.ResTypes),
|
||||
Secret: types.StringValue(recordRG.Secret),
|
||||
Status: types.StringValue(recordRG.Status),
|
||||
UniqPools: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &recordRG.UniqPools),
|
||||
UpdatedBy: types.StringValue(recordRG.UpdatedBy),
|
||||
UpdatedTime: types.Int64Value(int64(recordRG.UpdatedTime)),
|
||||
VINS: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &recordRG.VINS),
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGDataSource: after flatten", map[string]any{"rg_id": state.Id.ValueString()})
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGDataSource", map[string]any{"rg_id": state.Id.ValueString()})
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenACL(ctx context.Context, item *rg.ListACL) types.List {
|
||||
tflog.Info(ctx, "Start flattenACLItems")
|
||||
tempSlice := make([]types.Object, 0, len(*item))
|
||||
for _, aclItem := range *item {
|
||||
temp := models.ItemACLModel{
|
||||
Explicit: types.BoolValue(aclItem.Explicit),
|
||||
GUID: types.StringValue(aclItem.GUID),
|
||||
Right: types.StringValue(aclItem.Right),
|
||||
Status: types.StringValue(aclItem.Status),
|
||||
Type: types.StringValue(aclItem.Type),
|
||||
UserGroupID: types.StringValue(aclItem.UserGroupID),
|
||||
}
|
||||
obj, diags := types.ObjectValueFrom(ctx, models.ItemACL, temp)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenACLItems struct to obj", diags))
|
||||
}
|
||||
tempSlice = append(tempSlice, obj)
|
||||
}
|
||||
|
||||
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemACL}, tempSlice)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenACLItems", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenACLItems")
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenResourceLimits(ctx context.Context, item *rg.ResourceLimits) types.Object {
|
||||
tflog.Info(ctx, "Start flattenResourceLimits")
|
||||
temp := models.ResourceLimitsModel{
|
||||
CUC: types.Float64Value(item.CUC),
|
||||
CUD: types.Float64Value(item.CuD),
|
||||
CUDM: types.Float64Value(item.CUDM),
|
||||
CUI: types.Float64Value(item.CUI),
|
||||
CUM: types.Float64Value(item.CUM),
|
||||
CUNP: types.Float64Value(item.CUNP),
|
||||
GPUUnits: types.Float64Value(item.GPUUnits),
|
||||
}
|
||||
res, diags := types.ObjectValueFrom(ctx, models.ItemResourseModel, temp)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenResourceLimits struct to obj", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenResourceLimits")
|
||||
return res
|
||||
}
|
||||
@@ -0,0 +1,82 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities"
|
||||
)
|
||||
|
||||
// RGAffinityGroupComputesDataSource flattens data source for rg affinity group computes.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGAffinityGroupComputesDataSource(ctx context.Context, state *models.DataSourceRGAffinityGroupComputesModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGAffinityGroupComputesDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
agCompsList, err := utilities.RGAffinityGroupComputesCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group affinity group computes", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGAffinityGroupComputesDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGAffinityGroupComputesModel{
|
||||
RGID: state.RGID,
|
||||
AffinityGroup: state.AffinityGroup,
|
||||
|
||||
Timeouts: state.Timeouts,
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
var diagsItem diag.Diagnostics
|
||||
items := make([]models.ItemAffinityGroupComputeModel, 0, len(*agCompsList))
|
||||
for _, comp := range *agCompsList {
|
||||
item := models.ItemAffinityGroupComputeModel{
|
||||
ComputeID: types.Int64Value(int64(comp.ComputeID)),
|
||||
}
|
||||
|
||||
item.OtherNode, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.OtherNode)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.OtherNode to item.OtherNode", diags))
|
||||
}
|
||||
item.OtherNodeIndirect, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.OtherNodeIndirect)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.OtherNodeIndirect to item.OtherNodeIndirect", diags))
|
||||
}
|
||||
item.OtherNodeIndirectSoft, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.OtherNodeIndirectSoft)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.OtherNodeIndirectSoft to item.OtherNodeIndirectSoft", diags))
|
||||
}
|
||||
item.OtherNodeSoft, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.OtherNodeSoft)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.OtherNodeSoft to item.OtherNodeSoft", diags))
|
||||
}
|
||||
item.SameNode, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.SameNode)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.SameNode to item.SameNode", diags))
|
||||
}
|
||||
item.SameNodeSoft, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.SameNodeSoft)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.SameNodeSoft to item.SameNodeSoft", diags))
|
||||
}
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
state.Items = items
|
||||
|
||||
tflog.Info(ctx, "flattens.RGAffinityGroupComputesDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGAffinityGroupComputesDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,51 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities"
|
||||
)
|
||||
|
||||
// RGAffinityGroupsGetDataSource flattens data source for rg affinity groups get.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGAffinityGroupsGetDataSource(ctx context.Context, state *models.DataSourceRGAffinityGroupsGetModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGAffinityGroupsGetDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
agItem, err := utilities.RGAffinityGroupsGetCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group affinity groups get", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGAffinityGroupsGetDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGAffinityGroupsGetModel{
|
||||
RGID: state.RGID,
|
||||
AffinityGroup: state.AffinityGroup,
|
||||
|
||||
Timeouts: state.Timeouts,
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
var diagsItem diag.Diagnostics
|
||||
state.Ids, diagsItem = types.ListValueFrom(ctx, types.Int64Type, agItem)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupsGetDataSource: cannot flatten agItem to state.Ids", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGAffinityGroupsGetDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGAffinityGroupsGetDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,68 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities"
|
||||
)
|
||||
|
||||
// RGAffinityGroupsListDataSource flattens data source for rg affinity groups list.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGAffinityGroupsListDataSource(ctx context.Context, state *models.DataSourceRGAffinityGroupsListModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGAffinityGroupsListDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
agList, err := utilities.RGAffinityGroupsListCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group affinity groups list", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGAffinityGroupsListDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGAffinityGroupsListModel{
|
||||
RGID: state.RGID,
|
||||
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemAffinityGroupModel, 0, len(agList.Data))
|
||||
for _, data := range agList.Data {
|
||||
for agLabel, listAG := range data {
|
||||
item := models.ItemAffinityGroupModel{
|
||||
Label: types.StringValue(agLabel),
|
||||
}
|
||||
|
||||
ids := make([]models.ItemIDModel, 0, len(listAG))
|
||||
for _, agItem := range listAG {
|
||||
idItem := models.ItemIDModel{
|
||||
Id: types.Int64Value(int64(agItem.ID)),
|
||||
NodeId: types.Int64Value(int64(agItem.NodeID)),
|
||||
}
|
||||
ids = append(ids, idItem)
|
||||
}
|
||||
item.Ids = ids
|
||||
items = append(items, item)
|
||||
}
|
||||
}
|
||||
state.AffinityGroups = items
|
||||
state.EntryCount = types.Int64Value(int64(agList.EntryCount))
|
||||
|
||||
tflog.Info(ctx, "flattens.RGAffinityGroupsListDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGAffinityGroupsListDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities"
|
||||
)
|
||||
|
||||
// RGAuditsDataSource flattens data source for rg audits.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGAuditsDataSource(ctx context.Context, state *models.DataSourceRGAuditsModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGAuditsDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgAudits, err := utilities.RGAuditsCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group audits", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGAuditsDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGAuditsModel{
|
||||
RGID: state.RGID,
|
||||
Timeouts: state.Timeouts,
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemsRGAuditModel, 0, len(*rgAudits))
|
||||
for _, auditItem := range *rgAudits {
|
||||
item := models.ItemsRGAuditModel{
|
||||
Call: types.StringValue(auditItem.Call),
|
||||
ResponseTime: types.Float64Value(auditItem.ResponseTime),
|
||||
StatusCode: types.Int64Value(int64(auditItem.StatusCode)),
|
||||
Timestamp: types.Float64Value(auditItem.Timestamp),
|
||||
User: types.StringValue(auditItem.User),
|
||||
}
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
state.Items = items
|
||||
|
||||
tflog.Info(ctx, "flattens.RGAuditsDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGAuditsDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,97 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities"
|
||||
)
|
||||
|
||||
// RGGetResourceConsumptionDataSource flattens data source for rg get resource consumption.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGGetResourceConsumptionDataSource(ctx context.Context, state *models.DataSourceRGGetResourceConsumptionModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGGetResourceConsumptionDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
resourceItem, err := utilities.RGGetResourceConsumptionCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group get resource consumption", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGGetResourceConsumptionDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGGetResourceConsumptionModel{
|
||||
RGID: state.RGID,
|
||||
Timeouts: state.Timeouts,
|
||||
Id: types.StringValue(id.String()),
|
||||
|
||||
Consumed: flattenResource(ctx, &resourceItem.Consumed),
|
||||
Reserved: flattenResource(ctx, &resourceItem.Reserved),
|
||||
ResourceLimits: flattenResourceLimits(ctx, &resourceItem.ResourceLimits),
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGGetResourceConsumptionDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGGetResourceConsumptionDataSource")
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenResource(ctx context.Context, item *rg.Reservation) types.Object {
|
||||
tflog.Info(ctx, "Start flattenResource")
|
||||
temp := models.ResourceModel{
|
||||
CPU: types.Int64Value(item.CPU),
|
||||
DiskSize: types.Float64Value(item.DiskSize),
|
||||
DiskSizeMax: types.Float64Value(item.DiskSizeMax),
|
||||
ExtIPs: types.Int64Value(item.ExtIPs),
|
||||
ExtTraffic: types.Int64Value(item.ExtTraffic),
|
||||
GPU: types.Int64Value(item.GPU),
|
||||
RAM: types.Int64Value(item.RAM),
|
||||
SEPs: flattenSEPs(ctx, item.SEPs),
|
||||
}
|
||||
res, diags := types.ObjectValueFrom(ctx, models.ItemResource, temp)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenResource struct to obj", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenResource")
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenSEPs(ctx context.Context, seps map[string]map[string]rg.DiskUsage) types.List {
|
||||
tflog.Info(ctx, "Start flattenSEPs")
|
||||
tempSlice := make([]types.Object, 0, len(seps))
|
||||
for sepId, data := range seps {
|
||||
for dataName, diskData := range data {
|
||||
sepItem := models.SEPsModel{
|
||||
SepID: types.StringValue(sepId),
|
||||
DataName: types.StringValue(dataName),
|
||||
DiskSize: types.Float64Value(diskData.DiskSize),
|
||||
DiskSizeMax: types.Float64Value(diskData.DiskSizeMax),
|
||||
}
|
||||
obj, err := types.ObjectValueFrom(ctx, models.ItemSEPs, sepItem)
|
||||
if err != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenSEPs struct to obj", err))
|
||||
}
|
||||
tempSlice = append(tempSlice, obj)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemSEPs}, tempSlice)
|
||||
if err != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenSEPs", err))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenSEPs")
|
||||
return res
|
||||
}
|
||||
@@ -0,0 +1,95 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities"
|
||||
)
|
||||
|
||||
// RGListDataSource flattens data source for rg list.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGListDataSource(ctx context.Context, state *models.DataSourceRGListModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGListDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgList, err := utilities.RGListCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group list", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGListModel{
|
||||
ById: state.ById,
|
||||
Name: state.Name,
|
||||
AccountId: state.AccountId,
|
||||
AccountName: state.AccountName,
|
||||
CreatedAfter: state.CreatedAfter,
|
||||
CreatedBefore: state.CreatedBefore,
|
||||
Status: state.Status,
|
||||
LockStatus: state.LockStatus,
|
||||
IncludeDeleted: state.IncludeDeleted,
|
||||
SortBy: state.SortBy,
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemsRGListModel, 0, len(rgList.Data))
|
||||
for _, rgItem := range rgList.Data {
|
||||
item := models.ItemsRGListModel{
|
||||
AccountACL: flattenACL(ctx, &rgItem.ACL),
|
||||
AccountID: types.Int64Value(int64(rgItem.AccountID)),
|
||||
AccountName: types.StringValue(rgItem.AccountName),
|
||||
ComputeFeatures: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &rgItem.ComputeFeatures),
|
||||
CPUAllocationParameter: types.StringValue(rgItem.CPUAllocationParameter),
|
||||
CPUAllocationRatio: types.Float64Value(rgItem.CPUAllocationRatio),
|
||||
CreatedBy: types.StringValue(rgItem.CreatedBy),
|
||||
CreatedTime: types.Int64Value(int64(rgItem.CreatedTime)),
|
||||
DefNetID: types.Int64Value(rgItem.DefNetID),
|
||||
DefNetType: types.StringValue(rgItem.DefNetType),
|
||||
DeletedBy: types.StringValue(rgItem.DeletedBy),
|
||||
DeletedTime: types.Int64Value(int64(rgItem.DeletedTime)),
|
||||
Description: types.StringValue(rgItem.Description),
|
||||
Dirty: types.BoolValue(rgItem.Dirty),
|
||||
GID: types.Int64Value(int64(rgItem.GID)),
|
||||
GUID: types.Int64Value(int64(rgItem.GUID)),
|
||||
RGID: types.Int64Value(int64(rgItem.ID)),
|
||||
LockStatus: types.StringValue(rgItem.LockStatus),
|
||||
Milestones: types.Int64Value(int64(rgItem.Milestones)),
|
||||
Name: types.StringValue(rgItem.Name),
|
||||
ResTypes: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &rgItem.ResTypes),
|
||||
RegisterComputes: types.BoolValue(rgItem.RegisterComputes),
|
||||
ResourceLimits: flattenResourceLimits(ctx, &rgItem.ResourceLimits),
|
||||
Secret: types.StringValue(rgItem.Secret),
|
||||
Status: types.StringValue(rgItem.Status),
|
||||
UniqPools: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &rgItem.UniqPools),
|
||||
UpdatedBy: types.StringValue(rgItem.UpdatedBy),
|
||||
UpdatedTime: types.Int64Value(int64(rgItem.UpdatedTime)),
|
||||
VINS: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &rgItem.VINS),
|
||||
VMS: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &rgItem.VMs),
|
||||
}
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
state.Items = items
|
||||
state.EntryCount = types.Int64Value(int64(rgList.EntryCount))
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGListDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,121 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities"
|
||||
)
|
||||
|
||||
// RGListComputesDataSource flattens data source for rg list.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGListComputesDataSource(ctx context.Context, state *models.DataSourceRGListComputesModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGListComputesDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgListComputes, err := utilities.RGListComputesCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group list computes", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListComputesDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGListComputesModel{
|
||||
RGID: state.RGID,
|
||||
ComputeID: state.ComputeID,
|
||||
Name: state.Name,
|
||||
AccountID: state.AccountID,
|
||||
TechStatus: state.TechStatus,
|
||||
Status: state.Status,
|
||||
IPAddress: state.IPAddress,
|
||||
ExtNetName: state.ExtNetName,
|
||||
ExtNetID: state.ExtNetID,
|
||||
SortBy: state.SortBy,
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemsRGListComputeModel, 0, len(rgListComputes.Data))
|
||||
for _, compItem := range rgListComputes.Data {
|
||||
item := models.ItemsRGListComputeModel{
|
||||
AccountID: types.Int64Value(int64(compItem.AccountID)),
|
||||
AccountName: types.StringValue(compItem.AccountName),
|
||||
AffinityLabel: types.StringValue(compItem.AffinityLabel),
|
||||
AffinityRules: flattenAffinityRules(ctx, &compItem.AffinityRules),
|
||||
AffinityWeight: types.Int64Value(int64(compItem.AffinityWeight)),
|
||||
AntiAffinityRules: flattenAffinityRules(ctx, &compItem.AntiAffinityRules),
|
||||
CPUs: types.Int64Value(int64(compItem.CPUs)),
|
||||
CreatedBy: types.StringValue(compItem.CreatedBy),
|
||||
CreatedTime: types.Int64Value(int64(compItem.CreatedTime)),
|
||||
DeletedBy: types.StringValue(compItem.DeletedBy),
|
||||
DeletedTime: types.Int64Value(int64(compItem.DeletedTime)),
|
||||
ID: types.Int64Value(int64(compItem.ID)),
|
||||
Name: types.StringValue(compItem.Name),
|
||||
RAM: types.Int64Value(int64(compItem.RAM)),
|
||||
Registered: types.BoolValue(compItem.Registered),
|
||||
RGID: types.Int64Value(int64(compItem.RGID)),
|
||||
RGName: types.StringValue(compItem.RGName),
|
||||
Status: types.StringValue(compItem.Status),
|
||||
TechStatus: types.StringValue(compItem.TechStatus),
|
||||
TotalDisksSize: types.Int64Value(int64(compItem.TotalDisksSize)),
|
||||
UpdatedBy: types.StringValue(compItem.UpdatedBy),
|
||||
UpdatedTime: types.Int64Value(int64(compItem.UpdatedTime)),
|
||||
UserManaged: types.BoolValue(compItem.UserManaged),
|
||||
VINSConnected: types.Int64Value(int64(compItem.VINSConnected)),
|
||||
}
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
state.Items = items
|
||||
state.EntryCount = types.Int64Value(int64(rgListComputes.EntryCount))
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListComputesDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGListComputesDataSource")
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenAffinityRules(ctx context.Context, items *rg.ListRules) types.List {
|
||||
tflog.Info(ctx, "Start flattenAffinityRules")
|
||||
tempSlice := make([]types.Object, 0, len(*items))
|
||||
for _, ruleItem := range *items {
|
||||
temp := models.AffinityRuleModel{
|
||||
GUID: types.StringValue(ruleItem.GUID),
|
||||
Key: types.StringValue(ruleItem.Key),
|
||||
Mode: types.StringValue(ruleItem.Mode),
|
||||
Policy: types.StringValue(ruleItem.Policy),
|
||||
Topology: types.StringValue(ruleItem.Topology),
|
||||
Value: types.StringValue(ruleItem.Value),
|
||||
}
|
||||
|
||||
obj, diags := types.ObjectValueFrom(ctx, models.ItemAffinityRule, temp)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenAffinityRules struct to obj", diags))
|
||||
}
|
||||
tempSlice = append(tempSlice, obj)
|
||||
}
|
||||
|
||||
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemAffinityRule}, tempSlice)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenAffinityRules", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenAffinityRules")
|
||||
return res
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities"
|
||||
)
|
||||
|
||||
// RGListDeletedDataSource flattens data source for rg list deleted.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGListDeletedDataSource(ctx context.Context, state *models.DataSourceRGListDeletedModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGListDeletedDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgList, err := utilities.RGListDeletedCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group list deleted", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListDeletedDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGListDeletedModel{
|
||||
ById: state.ById,
|
||||
Name: state.Name,
|
||||
AccountId: state.AccountId,
|
||||
AccountName: state.AccountName,
|
||||
CreatedAfter: state.CreatedAfter,
|
||||
CreatedBefore: state.CreatedBefore,
|
||||
LockStatus: state.LockStatus,
|
||||
SortBy: state.SortBy,
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemsRGListDeletedModel, 0, len(rgList.Data))
|
||||
for _, rgItem := range rgList.Data {
|
||||
item := models.ItemsRGListDeletedModel{
|
||||
AccountACL: flattenACL(ctx, &rgItem.ACL),
|
||||
AccountID: types.Int64Value(int64(rgItem.AccountID)),
|
||||
AccountName: types.StringValue(rgItem.AccountName),
|
||||
ComputeFeatures: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &rgItem.ComputeFeatures),
|
||||
CPUAllocationParameter: types.StringValue(rgItem.CPUAllocationParameter),
|
||||
CPUAllocationRatio: types.Float64Value(rgItem.CPUAllocationRatio),
|
||||
CreatedBy: types.StringValue(rgItem.CreatedBy),
|
||||
CreatedTime: types.Int64Value(int64(rgItem.CreatedTime)),
|
||||
DefNetID: types.Int64Value(rgItem.DefNetID),
|
||||
DefNetType: types.StringValue(rgItem.DefNetType),
|
||||
DeletedBy: types.StringValue(rgItem.DeletedBy),
|
||||
DeletedTime: types.Int64Value(int64(rgItem.DeletedTime)),
|
||||
Description: types.StringValue(rgItem.Description),
|
||||
Dirty: types.BoolValue(rgItem.Dirty),
|
||||
GID: types.Int64Value(int64(rgItem.GID)),
|
||||
GUID: types.Int64Value(int64(rgItem.GUID)),
|
||||
RGID: types.Int64Value(int64(rgItem.ID)),
|
||||
LockStatus: types.StringValue(rgItem.LockStatus),
|
||||
Milestones: types.Int64Value(int64(rgItem.Milestones)),
|
||||
Name: types.StringValue(rgItem.Name),
|
||||
ResTypes: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &rgItem.ResTypes),
|
||||
RegisterComputes: types.BoolValue(rgItem.RegisterComputes),
|
||||
ResourceLimits: flattenResourceLimits(ctx, &rgItem.ResourceLimits),
|
||||
Secret: types.StringValue(rgItem.Secret),
|
||||
Status: types.StringValue(rgItem.Status),
|
||||
UniqPools: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &rgItem.UniqPools),
|
||||
UpdatedBy: types.StringValue(rgItem.UpdatedBy),
|
||||
UpdatedTime: types.Int64Value(int64(rgItem.UpdatedTime)),
|
||||
VINS: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &rgItem.VINS),
|
||||
VMS: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &rgItem.VMs),
|
||||
}
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
state.Items = items
|
||||
state.EntryCount = types.Int64Value(int64(rgList.EntryCount))
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListDeletedDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGListDeletedDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,172 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities"
|
||||
)
|
||||
|
||||
// RGListLBDataSource flattens data source for rg list lb.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGListLBDataSource(ctx context.Context, state *models.DataSourceRGListLBModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGListLBDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgListLB, err := utilities.RGListLBCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group list lb", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListLBDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGListLBModel{
|
||||
RGID: state.RGID,
|
||||
ByID: state.ByID,
|
||||
Name: state.Name,
|
||||
TechStatus: state.TechStatus,
|
||||
Status: state.Status,
|
||||
FrontIP: state.FrontIP,
|
||||
BackIP: state.BackIP,
|
||||
SortBy: state.SortBy,
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemsRGListLBModel, 0, len(rgListLB.Data))
|
||||
for _, lbItem := range rgListLB.Data {
|
||||
acl, _ := json.Marshal(lbItem.ACL)
|
||||
item := models.ItemsRGListLBModel{
|
||||
HAMode: types.BoolValue(lbItem.HAMode),
|
||||
ACL: types.StringValue(string(acl)),
|
||||
CreatedBy: types.StringValue(lbItem.CreatedBy),
|
||||
CreatedTime: types.Int64Value(int64(lbItem.CreatedTime)),
|
||||
DeletedBy: types.StringValue(lbItem.DeletedBy),
|
||||
DeletedTime: types.Int64Value(int64(lbItem.DeletedTime)),
|
||||
Description: types.StringValue(lbItem.Description),
|
||||
DPAPIUser: types.StringValue(lbItem.DPAPIUser),
|
||||
ExtNetID: types.Int64Value(int64(lbItem.ExtNetID)),
|
||||
GID: types.Int64Value(int64(lbItem.GID)),
|
||||
GUID: types.Int64Value(int64(lbItem.GUID)),
|
||||
ID: types.Int64Value(int64(lbItem.ID)),
|
||||
ImageID: types.Int64Value(int64(lbItem.ImageID)),
|
||||
Milestones: types.Int64Value(int64(lbItem.Milestones)),
|
||||
Name: types.StringValue(lbItem.Name),
|
||||
PrimaryNode: models.RecordNodeModel{
|
||||
BackendIP: types.StringValue(lbItem.PrimaryNode.BackendIP),
|
||||
ComputeID: types.Int64Value(int64(lbItem.PrimaryNode.ComputeID)),
|
||||
FrontendIP: types.StringValue(lbItem.PrimaryNode.FrontendIP),
|
||||
GUID: types.StringValue(lbItem.PrimaryNode.GUID),
|
||||
MGMTIP: types.StringValue(lbItem.PrimaryNode.MGMTIP),
|
||||
NetworkID: types.Int64Value(int64(lbItem.PrimaryNode.NetworkID)),
|
||||
},
|
||||
RGName: types.StringValue(lbItem.RGName),
|
||||
SecondaryNode: models.RecordNodeModel{
|
||||
BackendIP: types.StringValue(lbItem.SecondaryNode.BackendIP),
|
||||
ComputeID: types.Int64Value(int64(lbItem.SecondaryNode.ComputeID)),
|
||||
FrontendIP: types.StringValue(lbItem.SecondaryNode.FrontendIP),
|
||||
GUID: types.StringValue(lbItem.SecondaryNode.GUID),
|
||||
MGMTIP: types.StringValue(lbItem.SecondaryNode.MGMTIP),
|
||||
NetworkID: types.Int64Value(int64(lbItem.SecondaryNode.NetworkID)),
|
||||
},
|
||||
Status: types.StringValue(lbItem.Status),
|
||||
TechStatus: types.StringValue(lbItem.TechStatus),
|
||||
UpdatedBy: types.StringValue(lbItem.UpdatedBy),
|
||||
UpdatedTime: types.Int64Value(int64(lbItem.UpdatedTime)),
|
||||
VINSID: types.Int64Value(int64(lbItem.VINSID)),
|
||||
}
|
||||
|
||||
// flatten Backends
|
||||
backends := make([]models.ItemBackendModel, 0, len(lbItem.Backends))
|
||||
for _, backendItem := range lbItem.Backends {
|
||||
be := models.ItemBackendModel{
|
||||
Algorithm: types.StringValue(backendItem.Algorithm),
|
||||
GUID: types.StringValue(backendItem.GUID),
|
||||
Name: types.StringValue(backendItem.Name),
|
||||
ServerDefaultSettings: models.RecordServerSettingsModel{
|
||||
Inter: types.Int64Value(int64(backendItem.ServerDefaultSettings.Inter)),
|
||||
GUID: types.StringValue(backendItem.ServerDefaultSettings.GUID),
|
||||
DownInter: types.Int64Value(int64(backendItem.ServerDefaultSettings.DownInter)),
|
||||
Rise: types.Int64Value(int64(backendItem.ServerDefaultSettings.Rise)),
|
||||
Fall: types.Int64Value(int64(backendItem.ServerDefaultSettings.Fall)),
|
||||
SlowStart: types.Int64Value(int64(backendItem.ServerDefaultSettings.SlowStart)),
|
||||
MaxConn: types.Int64Value(int64(backendItem.ServerDefaultSettings.MaxConn)),
|
||||
MaxQueue: types.Int64Value(int64(backendItem.ServerDefaultSettings.MaxQueue)),
|
||||
Weight: types.Int64Value(int64(backendItem.ServerDefaultSettings.Weight)),
|
||||
},
|
||||
}
|
||||
|
||||
servers := make([]models.ItemServerModel, 0, len(backendItem.Servers))
|
||||
for _, server := range backendItem.Servers {
|
||||
s := models.ItemServerModel{
|
||||
Address: types.StringValue(server.Address),
|
||||
Check: types.StringValue(server.Check),
|
||||
GUID: types.StringValue(server.GUID),
|
||||
Name: types.StringValue(server.Name),
|
||||
Port: types.Int64Value(int64(server.Port)),
|
||||
ServerSettings: models.RecordServerSettingsModel{
|
||||
Inter: types.Int64Value(int64(server.ServerSettings.Inter)),
|
||||
GUID: types.StringValue(server.ServerSettings.GUID),
|
||||
DownInter: types.Int64Value(int64(server.ServerSettings.DownInter)),
|
||||
Rise: types.Int64Value(int64(server.ServerSettings.Rise)),
|
||||
Fall: types.Int64Value(int64(server.ServerSettings.Fall)),
|
||||
SlowStart: types.Int64Value(int64(server.ServerSettings.SlowStart)),
|
||||
MaxConn: types.Int64Value(int64(server.ServerSettings.MaxConn)),
|
||||
MaxQueue: types.Int64Value(int64(server.ServerSettings.MaxQueue)),
|
||||
Weight: types.Int64Value(int64(server.ServerSettings.Weight)),
|
||||
},
|
||||
}
|
||||
servers = append(servers, s)
|
||||
}
|
||||
be.Servers = servers
|
||||
backends = append(backends, be)
|
||||
}
|
||||
item.Backends = backends
|
||||
|
||||
// flatten Frontends
|
||||
frontends := make([]models.ItemFrontendModel, 0, len(lbItem.Frontends))
|
||||
for _, frontendItem := range lbItem.Frontends {
|
||||
fr := models.ItemFrontendModel{
|
||||
Backend: types.StringValue(frontendItem.Backend),
|
||||
GUID: types.StringValue(frontendItem.GUID),
|
||||
Name: types.StringValue(frontendItem.Name),
|
||||
}
|
||||
bindings := make([]models.ItemBindingModel, 0, len(frontendItem.Bindings))
|
||||
for _, bingingItem := range frontendItem.Bindings {
|
||||
b := models.ItemBindingModel{
|
||||
Address: types.StringValue(bingingItem.Address),
|
||||
GUID: types.StringValue(bingingItem.GUID),
|
||||
Name: types.StringValue(bingingItem.Name),
|
||||
Port: types.Int64Value(int64(bingingItem.Port)),
|
||||
}
|
||||
bindings = append(bindings, b)
|
||||
}
|
||||
fr.Bindings = bindings
|
||||
frontends = append(frontends, fr)
|
||||
}
|
||||
item.Frontends = frontends
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
state.Items = items
|
||||
state.EntryCount = types.Int64Value(int64(rgListLB.EntryCount))
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListLBDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGListLBDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities"
|
||||
)
|
||||
|
||||
// RGListPFWDataSource flattens data source for rg list pfw.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGListPFWDataSource(ctx context.Context, state *models.DataSourceRGListPFWModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGListPFWDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgListPFW, err := utilities.RGListPFWCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group list pfw", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListPFWDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGListPFWModel{
|
||||
RGID: state.RGID,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemsRGListPFWModel, 0, len(rgListPFW.Data))
|
||||
for _, pfwItem := range rgListPFW.Data {
|
||||
item := models.ItemsRGListPFWModel{
|
||||
PublicPortEnd: types.Int64Value(int64(pfwItem.PublicPortEnd)),
|
||||
PublicPortStart: types.Int64Value(int64(pfwItem.PublicPortStart)),
|
||||
VMID: types.Int64Value(int64(pfwItem.VMID)),
|
||||
VMIP: types.StringValue(pfwItem.VMIP),
|
||||
VMName: types.StringValue(pfwItem.VMName),
|
||||
VMPort: types.Int64Value(int64(pfwItem.VMPort)),
|
||||
VINSID: types.Int64Value(int64(pfwItem.VINSID)),
|
||||
VINSName: types.StringValue(pfwItem.VINSName),
|
||||
}
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
state.Items = items
|
||||
state.EntryCount = types.Int64Value(int64(rgListPFW.EntryCount))
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListPFWDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGListPFWDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,80 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities"
|
||||
)
|
||||
|
||||
// RGListVinsDataSource flattens data source for rg list.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGListVinsDataSource(ctx context.Context, state *models.DataSourceRGListVinsModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGListVinsDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgListVins, err := utilities.RGListVinsCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group list vins", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListVinsDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGListVinsModel{
|
||||
RGID: state.RGID,
|
||||
Name: state.Name,
|
||||
AccountID: state.AccountID,
|
||||
ExtIP: state.ExtIP,
|
||||
VINSID: state.VINSID,
|
||||
SortBy: state.SortBy,
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemsRGListVinsModel, 0, len(rgListVins.Data))
|
||||
for _, vinsItem := range rgListVins.Data {
|
||||
item := models.ItemsRGListVinsModel{
|
||||
AccountID: types.Int64Value(int64(vinsItem.AccountID)),
|
||||
AccountName: types.StringValue(vinsItem.AccountName),
|
||||
Computes: types.Int64Value(int64(vinsItem.Computes)),
|
||||
CreatedBy: types.StringValue(vinsItem.CreatedBy),
|
||||
CreatedTime: types.Int64Value(int64(vinsItem.CreatedTime)),
|
||||
DeletedBy: types.StringValue(vinsItem.DeletedBy),
|
||||
DeletedTime: types.Int64Value(int64(vinsItem.DeletedTime)),
|
||||
ExternalIP: types.StringValue(vinsItem.ExternalIP),
|
||||
ExtnetID: types.Int64Value(int64(vinsItem.ExtnetId)),
|
||||
FreeIPs: types.Int64Value(int64(vinsItem.FreeIPs)),
|
||||
ID: types.Int64Value(int64(vinsItem.ID)),
|
||||
Name: types.StringValue(vinsItem.Name),
|
||||
Network: types.StringValue(vinsItem.Network),
|
||||
PriVNFDevID: types.Int64Value(int64(vinsItem.PriVNFDevID)),
|
||||
RGID: types.Int64Value(int64(vinsItem.RGID)),
|
||||
RGName: types.StringValue(vinsItem.RGName),
|
||||
Status: types.StringValue(vinsItem.Status),
|
||||
UpdatedBy: types.StringValue(vinsItem.UpdatedBy),
|
||||
UpdatedTime: types.Int64Value(int64(vinsItem.UpdatedTime)),
|
||||
}
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
state.Items = items
|
||||
state.EntryCount = types.Int64Value(int64(rgListVins.EntryCount))
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListVinsDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGListVinsDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities"
|
||||
)
|
||||
|
||||
// RGResourceConsumptionListDataSource flattens data source for rg resource consumption list.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGResourceConsumptionListDataSource(ctx context.Context, state *models.DataSourceRGResourceConsumptionListModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGResourceConsumptionListDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
resConsList, err := utilities.RGResourceConsumptionListCheckPresence(ctx, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group resource consumption list", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGResourceConsumptionListDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGResourceConsumptionListModel{
|
||||
Timeouts: state.Timeouts,
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemResourceConsumptionModel, 0, len(resConsList.Data))
|
||||
for _, resConsItem := range resConsList.Data {
|
||||
item := models.ItemResourceConsumptionModel{
|
||||
RGID: types.Int64Value(int64(resConsItem.RGID)),
|
||||
Consumed: flattenResource(ctx, &resConsItem.Consumed),
|
||||
Reserved: flattenResource(ctx, &resConsItem.Reserved),
|
||||
ResourceLimits: flattenResourceLimits(ctx, &resConsItem.ResourceLimits),
|
||||
}
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
state.Items = items
|
||||
state.EntryCount = types.Int64Value(int64(resConsList.EntryCount))
|
||||
|
||||
tflog.Info(ctx, "flattens.RGResourceConsumptionListDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGResourceConsumptionListDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities"
|
||||
)
|
||||
|
||||
// RGUsageDataSource flattens data source for rg usage.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGUsageDataSource(ctx context.Context, state *models.DataSourceRGUsageModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGUsageDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
usageInfo, err := utilities.RGUsageCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group usage", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGUsageDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGUsageModel{
|
||||
RGID: state.RGID,
|
||||
Reason: state.Reason,
|
||||
Timeouts: state.Timeouts,
|
||||
Id: types.StringValue(id.String()),
|
||||
|
||||
CPU: types.Int64Value(int64(usageInfo.CPU)),
|
||||
DiskSize: types.Int64Value(int64(usageInfo.DiskSize)),
|
||||
DiskSizeMax: types.Int64Value(int64(usageInfo.DiskSizeMax)),
|
||||
ExtIPs: types.Int64Value(int64(usageInfo.ExtIPs)),
|
||||
ExtTraffic: types.Int64Value(int64(usageInfo.ExtTraffic)),
|
||||
GPU: types.Int64Value(int64(usageInfo.GPU)),
|
||||
RAM: types.Int64Value(int64(usageInfo.RAM)),
|
||||
SEPs: flattenSEPs(ctx, usageInfo.SEPs),
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGUsageDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGUsageDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/attr"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGModel struct {
|
||||
// request fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
Reason types.String `tfsdk:"reason"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
ACL types.List `tfsdk:"acl"`
|
||||
ComputeFeatures types.List `tfsdk:"compute_features"`
|
||||
CPUAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"`
|
||||
CPUAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"`
|
||||
CreatedBy types.String `tfsdk:"created_by"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DefNetID types.Int64 `tfsdk:"def_net_id"`
|
||||
DefNetType types.String `tfsdk:"def_net_type"`
|
||||
DeletedBy types.String `tfsdk:"deleted_by"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
Description types.String `tfsdk:"desc"`
|
||||
Dirty types.Bool `tfsdk:"dirty"`
|
||||
GID types.Int64 `tfsdk:"gid"`
|
||||
GUID types.Int64 `tfsdk:"guid"`
|
||||
Id types.String `tfsdk:"id"`
|
||||
LockStatus types.String `tfsdk:"lock_status"`
|
||||
Milestones types.Int64 `tfsdk:"milestones"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
RegisterComputes types.Bool `tfsdk:"register_computes"`
|
||||
ResourceLimits types.Object `tfsdk:"resource_limits"`
|
||||
ResourceTypes types.List `tfsdk:"resource_types"`
|
||||
Secret types.String `tfsdk:"secret"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
UniqPools types.List `tfsdk:"uniq_pools"`
|
||||
UpdatedBy types.String `tfsdk:"updated_by"`
|
||||
UpdatedTime types.Int64 `tfsdk:"updated_time"`
|
||||
VINS types.List `tfsdk:"vins"`
|
||||
Computes types.List `tfsdk:"computes"`
|
||||
}
|
||||
|
||||
type ResourceLimitsModel struct {
|
||||
CUC types.Float64 `tfsdk:"cu_c"`
|
||||
CUD types.Float64 `tfsdk:"cu_d"`
|
||||
CUDM types.Float64 `tfsdk:"cu_dm"`
|
||||
CUI types.Float64 `tfsdk:"cu_i"`
|
||||
CUM types.Float64 `tfsdk:"cu_m"`
|
||||
CUNP types.Float64 `tfsdk:"cu_np"`
|
||||
GPUUnits types.Float64 `tfsdk:"gpu_units"`
|
||||
}
|
||||
|
||||
var ItemResourseModel = map[string]attr.Type{
|
||||
"cu_c": types.Float64Type,
|
||||
"cu_d": types.Float64Type,
|
||||
"cu_dm": types.Float64Type,
|
||||
"cu_i": types.Float64Type,
|
||||
"cu_m": types.Float64Type,
|
||||
"cu_np": types.Float64Type,
|
||||
"gpu_units": types.Float64Type,
|
||||
}
|
||||
|
||||
type ItemACLModel struct {
|
||||
Explicit types.Bool `tfsdk:"explicit"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Right types.String `tfsdk:"right"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
Type types.String `tfsdk:"type"`
|
||||
UserGroupID types.String `tfsdk:"user_group_id"`
|
||||
}
|
||||
|
||||
var ItemACL = map[string]attr.Type{
|
||||
"explicit": types.BoolType,
|
||||
"guid": types.StringType,
|
||||
"right": types.StringType,
|
||||
"status": types.StringType,
|
||||
"type": types.StringType,
|
||||
"user_group_id": types.StringType,
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGAffinityGroupComputesModel struct {
|
||||
// request required fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
AffinityGroup types.String `tfsdk:"affinity_group"`
|
||||
|
||||
// request optional fields
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemAffinityGroupComputeModel `tfsdk:"items"`
|
||||
}
|
||||
|
||||
type ItemAffinityGroupComputeModel struct {
|
||||
ComputeID types.Int64 `tfsdk:"compute_id"`
|
||||
OtherNode types.List `tfsdk:"other_node"`
|
||||
OtherNodeIndirect types.List `tfsdk:"other_node_indirect"`
|
||||
OtherNodeIndirectSoft types.List `tfsdk:"other_node_indirect_soft"`
|
||||
OtherNodeSoft types.List `tfsdk:"other_node_soft"`
|
||||
SameNode types.List `tfsdk:"same_node"`
|
||||
SameNodeSoft types.List `tfsdk:"same_node_soft"`
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGAffinityGroupsGetModel struct {
|
||||
// request required fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
AffinityGroup types.String `tfsdk:"affinity_group"`
|
||||
|
||||
// request optional fields
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Ids types.List `tfsdk:"ids"`
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGAffinityGroupsListModel struct {
|
||||
// request required fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
|
||||
// request optional fields
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
AffinityGroups []ItemAffinityGroupModel `tfsdk:"affinity_groups"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemAffinityGroupModel struct {
|
||||
Label types.String `tfsdk:"label"`
|
||||
Ids []ItemIDModel `tfsdk:"ids"`
|
||||
}
|
||||
|
||||
type ItemIDModel struct {
|
||||
Id types.Int64 `tfsdk:"id"`
|
||||
NodeId types.Int64 `tfsdk:"node_id"`
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGAuditsModel struct {
|
||||
// request field
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemsRGAuditModel `tfsdk:"items"`
|
||||
}
|
||||
|
||||
type ItemsRGAuditModel struct {
|
||||
Call types.String `tfsdk:"call"`
|
||||
ResponseTime types.Float64 `tfsdk:"responsetime"`
|
||||
StatusCode types.Int64 `tfsdk:"statuscode"`
|
||||
Timestamp types.Float64 `tfsdk:"timestamp"`
|
||||
User types.String `tfsdk:"user"`
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/attr"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGGetResourceConsumptionModel struct {
|
||||
// request fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Consumed types.Object `tfsdk:"consumed"`
|
||||
Reserved types.Object `tfsdk:"reserved"`
|
||||
ResourceLimits types.Object `tfsdk:"resource_limits"`
|
||||
}
|
||||
|
||||
type ResourceModel struct {
|
||||
CPU types.Int64 `tfsdk:"cpu"`
|
||||
DiskSize types.Float64 `tfsdk:"disk_size"`
|
||||
DiskSizeMax types.Float64 `tfsdk:"disk_size_max"`
|
||||
ExtIPs types.Int64 `tfsdk:"extips"`
|
||||
ExtTraffic types.Int64 `tfsdk:"exttraffic"`
|
||||
GPU types.Int64 `tfsdk:"gpu"`
|
||||
RAM types.Int64 `tfsdk:"ram"`
|
||||
SEPs types.List `tfsdk:"seps"`
|
||||
}
|
||||
|
||||
type SEPsModel struct {
|
||||
SepID types.String `tfsdk:"sep_id"`
|
||||
DataName types.String `tfsdk:"data_name"`
|
||||
DiskSize types.Float64 `tfsdk:"disk_size"`
|
||||
DiskSizeMax types.Float64 `tfsdk:"disk_size_max"`
|
||||
}
|
||||
|
||||
var ItemSEPs = map[string]attr.Type{
|
||||
"sep_id": types.StringType,
|
||||
"data_name": types.StringType,
|
||||
"disk_size": types.Float64Type,
|
||||
"disk_size_max": types.Float64Type,
|
||||
}
|
||||
|
||||
var ItemResource = map[string]attr.Type{
|
||||
"cpu": types.Int64Type,
|
||||
"disk_size": types.Float64Type,
|
||||
"disk_size_max": types.Float64Type,
|
||||
"extips": types.Int64Type,
|
||||
"exttraffic": types.Int64Type,
|
||||
"gpu": types.Int64Type,
|
||||
"ram": types.Int64Type,
|
||||
"seps": types.ListType{ElemType: types.ObjectType{AttrTypes: ItemSEPs}},
|
||||
}
|
||||
@@ -0,0 +1,61 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGListModel struct {
|
||||
// request optional fields
|
||||
ById types.Int64 `tfsdk:"by_id"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
AccountId types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
CreatedAfter types.Int64 `tfsdk:"created_after"`
|
||||
CreatedBefore types.Int64 `tfsdk:"created_before"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
LockStatus types.String `tfsdk:"lock_status"`
|
||||
IncludeDeleted types.Bool `tfsdk:"includedeleted"`
|
||||
SortBy types.String `tfsdk:"sort_by"`
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemsRGListModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemsRGListModel struct {
|
||||
AccountACL types.List `tfsdk:"acl"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
ComputeFeatures types.List `tfsdk:"compute_features"`
|
||||
CPUAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"`
|
||||
CPUAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"`
|
||||
CreatedBy types.String `tfsdk:"created_by"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DefNetID types.Int64 `tfsdk:"def_net_id"`
|
||||
DefNetType types.String `tfsdk:"def_net_type"`
|
||||
DeletedBy types.String `tfsdk:"deleted_by"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
Description types.String `tfsdk:"desc"`
|
||||
Dirty types.Bool `tfsdk:"dirty"`
|
||||
GID types.Int64 `tfsdk:"gid"`
|
||||
GUID types.Int64 `tfsdk:"guid"`
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
LockStatus types.String `tfsdk:"lock_status"`
|
||||
Milestones types.Int64 `tfsdk:"milestones"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
RegisterComputes types.Bool `tfsdk:"register_computes"`
|
||||
ResourceLimits types.Object `tfsdk:"resource_limits"`
|
||||
ResTypes types.List `tfsdk:"resource_types"`
|
||||
Secret types.String `tfsdk:"secret"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
UniqPools types.List `tfsdk:"uniq_pools"`
|
||||
UpdatedBy types.String `tfsdk:"updated_by"`
|
||||
UpdatedTime types.Int64 `tfsdk:"updated_time"`
|
||||
VINS types.List `tfsdk:"vins"`
|
||||
VMS types.List `tfsdk:"vms"`
|
||||
}
|
||||
@@ -0,0 +1,76 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/attr"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGListComputesModel struct {
|
||||
// request required fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
|
||||
// request optional fields
|
||||
ComputeID types.Int64 `tfsdk:"compute_id"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
TechStatus types.String `tfsdk:"tech_status"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
IPAddress types.String `tfsdk:"ip_address"`
|
||||
ExtNetName types.String `tfsdk:"extnet_name"`
|
||||
ExtNetID types.Int64 `tfsdk:"extnet_id"`
|
||||
SortBy types.String `tfsdk:"sort_by"`
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemsRGListComputeModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemsRGListComputeModel struct {
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
AffinityLabel types.String `tfsdk:"affinity_label"`
|
||||
AffinityRules types.List `tfsdk:"affinity_rules"`
|
||||
AffinityWeight types.Int64 `tfsdk:"affinity_weight"`
|
||||
AntiAffinityRules types.List `tfsdk:"antiaffinity_rules"`
|
||||
CPUs types.Int64 `tfsdk:"cpus"`
|
||||
CreatedBy types.String `tfsdk:"created_by"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DeletedBy types.String `tfsdk:"deleted_by"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
ID types.Int64 `tfsdk:"id"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
RAM types.Int64 `tfsdk:"ram"`
|
||||
Registered types.Bool `tfsdk:"registered"`
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
RGName types.String `tfsdk:"rg_name"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
TechStatus types.String `tfsdk:"tech_status"`
|
||||
TotalDisksSize types.Int64 `tfsdk:"total_disks_size"`
|
||||
UpdatedBy types.String `tfsdk:"updated_by"`
|
||||
UpdatedTime types.Int64 `tfsdk:"updated_time"`
|
||||
UserManaged types.Bool `tfsdk:"user_managed"`
|
||||
VINSConnected types.Int64 `tfsdk:"vins_connected"`
|
||||
}
|
||||
|
||||
type AffinityRuleModel struct {
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Key types.String `tfsdk:"key"`
|
||||
Mode types.String `tfsdk:"mode"`
|
||||
Policy types.String `tfsdk:"policy"`
|
||||
Topology types.String `tfsdk:"topology"`
|
||||
Value types.String `tfsdk:"value"`
|
||||
}
|
||||
|
||||
var ItemAffinityRule = map[string]attr.Type{
|
||||
"guid": types.StringType,
|
||||
"key": types.StringType,
|
||||
"mode": types.StringType,
|
||||
"policy": types.StringType,
|
||||
"topology": types.StringType,
|
||||
"value": types.StringType,
|
||||
}
|
||||
@@ -0,0 +1,59 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGListDeletedModel struct {
|
||||
// request optional fields
|
||||
ById types.Int64 `tfsdk:"by_id"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
AccountId types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
CreatedAfter types.Int64 `tfsdk:"created_after"`
|
||||
CreatedBefore types.Int64 `tfsdk:"created_before"`
|
||||
LockStatus types.String `tfsdk:"lock_status"`
|
||||
SortBy types.String `tfsdk:"sort_by"`
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemsRGListDeletedModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemsRGListDeletedModel struct {
|
||||
AccountACL types.List `tfsdk:"acl"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
ComputeFeatures types.List `tfsdk:"compute_features"`
|
||||
CPUAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"`
|
||||
CPUAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"`
|
||||
CreatedBy types.String `tfsdk:"created_by"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DefNetID types.Int64 `tfsdk:"def_net_id"`
|
||||
DefNetType types.String `tfsdk:"def_net_type"`
|
||||
DeletedBy types.String `tfsdk:"deleted_by"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
Description types.String `tfsdk:"desc"`
|
||||
Dirty types.Bool `tfsdk:"dirty"`
|
||||
GID types.Int64 `tfsdk:"gid"`
|
||||
GUID types.Int64 `tfsdk:"guid"`
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
LockStatus types.String `tfsdk:"lock_status"`
|
||||
Milestones types.Int64 `tfsdk:"milestones"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
RegisterComputes types.Bool `tfsdk:"register_computes"`
|
||||
ResourceLimits types.Object `tfsdk:"resource_limits"`
|
||||
ResTypes types.List `tfsdk:"resource_types"`
|
||||
Secret types.String `tfsdk:"secret"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
UniqPools types.List `tfsdk:"uniq_pools"`
|
||||
UpdatedBy types.String `tfsdk:"updated_by"`
|
||||
UpdatedTime types.Int64 `tfsdk:"updated_time"`
|
||||
VINS types.List `tfsdk:"vins"`
|
||||
VMS types.List `tfsdk:"vms"`
|
||||
}
|
||||
@@ -0,0 +1,108 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGListLBModel struct {
|
||||
// request required fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
|
||||
// request optional fields
|
||||
ByID types.Int64 `tfsdk:"by_id"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
TechStatus types.String `tfsdk:"tech_status"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
FrontIP types.String `tfsdk:"front_ip"`
|
||||
BackIP types.String `tfsdk:"back_ip"`
|
||||
SortBy types.String `tfsdk:"sort_by"`
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemsRGListLBModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemsRGListLBModel struct {
|
||||
HAMode types.Bool `tfsdk:"ha_mode"`
|
||||
ACL types.String `tfsdk:"acl"`
|
||||
Backends []ItemBackendModel `tfsdk:"backends"`
|
||||
CreatedBy types.String `tfsdk:"created_by"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DeletedBy types.String `tfsdk:"deleted_by"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
Description types.String `tfsdk:"desc"`
|
||||
DPAPIUser types.String `tfsdk:"dp_api_user"`
|
||||
ExtNetID types.Int64 `tfsdk:"extnet_id"`
|
||||
Frontends []ItemFrontendModel `tfsdk:"frontends"`
|
||||
GID types.Int64 `tfsdk:"gid"`
|
||||
GUID types.Int64 `tfsdk:"guid"`
|
||||
ID types.Int64 `tfsdk:"id"`
|
||||
ImageID types.Int64 `tfsdk:"image_id"`
|
||||
Milestones types.Int64 `tfsdk:"milestones"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
PrimaryNode RecordNodeModel `tfsdk:"primary_node"`
|
||||
RGName types.String `tfsdk:"rg_name"`
|
||||
SecondaryNode RecordNodeModel `tfsdk:"secondary_node"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
TechStatus types.String `tfsdk:"tech_status"`
|
||||
UpdatedBy types.String `tfsdk:"updated_by"`
|
||||
UpdatedTime types.Int64 `tfsdk:"updated_time"`
|
||||
VINSID types.Int64 `tfsdk:"vins_id"`
|
||||
}
|
||||
|
||||
type ItemFrontendModel struct {
|
||||
Backend types.String `tfsdk:"backend"`
|
||||
Bindings []ItemBindingModel `tfsdk:"bindings"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
}
|
||||
|
||||
type ItemBindingModel struct {
|
||||
Address types.String `tfsdk:"address"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
Port types.Int64 `tfsdk:"port"`
|
||||
}
|
||||
|
||||
type RecordNodeModel struct {
|
||||
BackendIP types.String `tfsdk:"backend_ip"`
|
||||
ComputeID types.Int64 `tfsdk:"compute_id"`
|
||||
FrontendIP types.String `tfsdk:"frontend_ip"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
MGMTIP types.String `tfsdk:"mgmt_ip"`
|
||||
NetworkID types.Int64 `tfsdk:"network_id"`
|
||||
}
|
||||
|
||||
type ItemBackendModel struct {
|
||||
Algorithm types.String `tfsdk:"algorithm"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
ServerDefaultSettings RecordServerSettingsModel `tfsdk:"server_default_settings"`
|
||||
Servers []ItemServerModel `tfsdk:"servers"`
|
||||
}
|
||||
|
||||
type RecordServerSettingsModel struct {
|
||||
Inter types.Int64 `tfsdk:"inter"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
DownInter types.Int64 `tfsdk:"down_inter"`
|
||||
Rise types.Int64 `tfsdk:"rise"`
|
||||
Fall types.Int64 `tfsdk:"fall"`
|
||||
SlowStart types.Int64 `tfsdk:"slow_start"`
|
||||
MaxConn types.Int64 `tfsdk:"max_conn"`
|
||||
MaxQueue types.Int64 `tfsdk:"max_queue"`
|
||||
Weight types.Int64 `tfsdk:"weight"`
|
||||
}
|
||||
|
||||
type ItemServerModel struct {
|
||||
Address types.String `tfsdk:"address"`
|
||||
Check types.String `tfsdk:"check"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
Port types.Int64 `tfsdk:"port"`
|
||||
ServerSettings RecordServerSettingsModel `tfsdk:"server_settings"`
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGListPFWModel struct {
|
||||
// request fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"` // required
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"` // optional
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemsRGListPFWModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemsRGListPFWModel struct {
|
||||
PublicPortEnd types.Int64 `tfsdk:"public_port_end"`
|
||||
PublicPortStart types.Int64 `tfsdk:"public_port_start"`
|
||||
VMID types.Int64 `tfsdk:"vm_id"`
|
||||
VMIP types.String `tfsdk:"vm_ip"`
|
||||
VMName types.String `tfsdk:"vm_name"`
|
||||
VMPort types.Int64 `tfsdk:"vm_port"`
|
||||
VINSID types.Int64 `tfsdk:"vins_id"`
|
||||
VINSName types.String `tfsdk:"vins_name"`
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGListVinsModel struct {
|
||||
// request required fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
|
||||
// request optional fields
|
||||
Name types.String `tfsdk:"name"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
ExtIP types.String `tfsdk:"ext_ip"`
|
||||
VINSID types.Int64 `tfsdk:"vins_id"`
|
||||
SortBy types.String `tfsdk:"sort_by"`
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemsRGListVinsModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemsRGListVinsModel struct {
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
Computes types.Int64 `tfsdk:"computes"`
|
||||
CreatedBy types.String `tfsdk:"created_by"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DeletedBy types.String `tfsdk:"deleted_by"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
ExternalIP types.String `tfsdk:"external_ip"`
|
||||
ExtnetID types.Int64 `tfsdk:"extnet_id"`
|
||||
FreeIPs types.Int64 `tfsdk:"free_ips"`
|
||||
ID types.Int64 `tfsdk:"id"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
Network types.String `tfsdk:"network"`
|
||||
PriVNFDevID types.Int64 `tfsdk:"pri_vnf_dev_id"`
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
RGName types.String `tfsdk:"rg_name"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
UpdatedBy types.String `tfsdk:"updated_by"`
|
||||
UpdatedTime types.Int64 `tfsdk:"updated_time"`
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGResourceConsumptionListModel struct {
|
||||
// request fields
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemResourceConsumptionModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemResourceConsumptionModel struct {
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
Consumed types.Object `tfsdk:"consumed"`
|
||||
Reserved types.Object `tfsdk:"reserved"`
|
||||
ResourceLimits types.Object `tfsdk:"resource_limits"`
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGUsageModel struct {
|
||||
// request fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
Reason types.String `tfsdk:"reason"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
CPU types.Int64 `tfsdk:"cpu"`
|
||||
DiskSize types.Int64 `tfsdk:"disk_size"`
|
||||
DiskSizeMax types.Int64 `tfsdk:"disk_size_max"`
|
||||
ExtIPs types.Int64 `tfsdk:"extips"`
|
||||
ExtTraffic types.Int64 `tfsdk:"exttraffic"`
|
||||
GPU types.Int64 `tfsdk:"gpu"`
|
||||
RAM types.Int64 `tfsdk:"ram"`
|
||||
SEPs types.List `tfsdk:"seps"`
|
||||
}
|
||||
164
internal/service/cloudbroker/rg/schemas/schema_data_source_rg.go
Normal file
164
internal/service/cloudbroker/rg/schemas/schema_data_source_rg.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRG() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "resource group id",
|
||||
},
|
||||
|
||||
// optional attributes
|
||||
"reason": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "reason for request",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"account_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"acl": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"explicit": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"right": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"user_group_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"compute_features": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"cpu_allocation_parameter": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cpu_allocation_ratio": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"desc": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"dirty": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"lock_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"register_computes": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"resource_limits": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cu_c": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_d": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_dm": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_i": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_m": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_np": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu_units": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"resource_types": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"secret": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vins": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"computes": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"uniq_pools": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,59 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGAffinityGroupComputes() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "find by rg id",
|
||||
},
|
||||
"affinity_group": schema.StringAttribute{
|
||||
Required: true,
|
||||
Description: "Affinity group label",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"compute_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"other_node": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"other_node_indirect": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"other_node_indirect_soft": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"other_node_soft": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"same_node": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"same_node_soft": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGAffinityGroupsGet() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "find by rg id",
|
||||
},
|
||||
"affinity_group": schema.StringAttribute{
|
||||
Required: true,
|
||||
Description: "Affinity group label",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"ids": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,57 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGAffinityGroupsList() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "find by rg id",
|
||||
},
|
||||
|
||||
// optional attributes
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "page number",
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "size number",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"affinity_groups": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"label": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"ids": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"node_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGAudits() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"call": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"responsetime": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"statuscode": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"user": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,136 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGGetResourceConsumption() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "find by rg id",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"consumed": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"extips": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"exttraffic": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"ram": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"seps": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"sep_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"reserved": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"extips": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"exttraffic": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"ram": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"seps": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"sep_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"resource_limits": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cu_c": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_d": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_dm": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_i": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_m": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_np": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu_units": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,215 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGList() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// optional attributes
|
||||
"by_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by id",
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by name",
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by account id",
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by account name",
|
||||
},
|
||||
"created_after": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by created after time (unix timestamp)",
|
||||
},
|
||||
"created_before": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by created before time (unix timestamp)",
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by status",
|
||||
},
|
||||
"lock_status": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by lock status",
|
||||
},
|
||||
"includedeleted": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
Description: "included deleted resource groups. If using field 'status', then includedeleted will be ignored",
|
||||
},
|
||||
"sort_by": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "sort by one of supported fields, format +|-(field)",
|
||||
},
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "page number",
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "size number",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"acl": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"explicit": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"right": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"user_group_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"compute_features": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"created_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"desc": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"dirty": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"lock_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"register_computes": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"resource_limits": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cu_c": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_d": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_dm": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_i": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_m": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_np": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu_units": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"secret": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vins": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"vms": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"resource_types": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"cpu_allocation_parameter": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cpu_allocation_ratio": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"uniq_pools": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,192 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGListComputes() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "ID of the RG",
|
||||
},
|
||||
|
||||
// optional attributes
|
||||
"compute_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by id",
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by name",
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by account id",
|
||||
},
|
||||
"tech_status": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by tech status",
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by status",
|
||||
},
|
||||
"ip_address": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by ip address",
|
||||
},
|
||||
"extnet_name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by external network name",
|
||||
},
|
||||
"extnet_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by external network id",
|
||||
},
|
||||
"sort_by": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "sort by one of supported fields, format +|-(field)",
|
||||
},
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "page number",
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "size number",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"account_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"affinity_label": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"affinity_rules": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"key": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"mode": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"policy": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"topology": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"value": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"affinity_weight": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"antiaffinity_rules": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"key": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"mode": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"policy": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"topology": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"value": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"cpus": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"ram": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"registered": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"rg_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_disks_size": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"user_managed": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vins_connected": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,207 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGListDeleted() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// optional attributes
|
||||
"by_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by id",
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by name",
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by account id",
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by account name",
|
||||
},
|
||||
"created_after": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by created after time (unix timestamp)",
|
||||
},
|
||||
"created_before": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by created before time (unix timestamp)",
|
||||
},
|
||||
"lock_status": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by lock status",
|
||||
},
|
||||
"sort_by": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "sort by one of supported fields, format +|-(field)",
|
||||
},
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "page number",
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "size number",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"acl": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"explicit": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"right": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"user_group_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"compute_features": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"created_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"desc": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"dirty": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"lock_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"register_computes": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"resource_limits": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cu_c": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_d": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_dm": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_i": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_m": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_np": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu_units": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"secret": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vins": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"vms": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"resource_types": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"cpu_allocation_parameter": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cpu_allocation_ratio": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"uniq_pools": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,314 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGListLB() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "ID of the RG",
|
||||
},
|
||||
|
||||
// optional attributes
|
||||
"by_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by id",
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by name",
|
||||
},
|
||||
"tech_status": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by tech status",
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by status",
|
||||
},
|
||||
"front_ip": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by frontend Ip",
|
||||
},
|
||||
"back_ip": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by backend Ip",
|
||||
},
|
||||
"sort_by": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "sort by one of supported fields, format +|-(field)",
|
||||
},
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "page number",
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "size number",
|
||||
},
|
||||
|
||||
// computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"ha_mode": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"acl": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"backends": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"algorithm": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"server_default_settings": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"inter": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"down_inter": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"rise": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"fall": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"slow_start": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"max_conn": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"max_queue": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"weight": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"servers": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"address": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"check": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"port": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"server_settings": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"inter": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"down_inter": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"rise": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"fall": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"slow_start": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"max_conn": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"max_queue": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"weight": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"created_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"desc": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"dp_api_user": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"extnet_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"frontends": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"backend": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"bindings": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"address": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"port": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"gid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"image_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"primary_node": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"backend_ip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"compute_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"frontend_ip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"mgmt_ip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"network_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"rg_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"secondary_node": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"backend_ip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"compute_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"frontend_ip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"mgmt_ip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"network_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vins_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,54 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGListPFW() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "ID of the RG",
|
||||
},
|
||||
|
||||
// computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"public_port_end": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"public_port_start": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vm_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vm_ip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vm_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vm_port": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vins_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vins_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,117 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGListVins() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "ID of the RG",
|
||||
},
|
||||
|
||||
// optional attributes
|
||||
"name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by name",
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by account id",
|
||||
},
|
||||
"ext_ip": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by external ip address",
|
||||
},
|
||||
"vins_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by vins id",
|
||||
},
|
||||
"sort_by": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "sort by one of supported fields, format +|-(field)",
|
||||
},
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "page number",
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "size number",
|
||||
},
|
||||
|
||||
// computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"account_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"computes": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"external_ip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"extnet_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"free_ips": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"network": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pri_vnf_dev_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"rg_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,143 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGResourceConsumptionList() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"consumed": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"extips": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"exttraffic": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"ram": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"seps": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"sep_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"reserved": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"extips": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"exttraffic": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"ram": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"seps": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"sep_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"resource_limits": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cu_c": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_d": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_dm": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_i": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_m": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_np": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu_units": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,66 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGUsage() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "find by rg id",
|
||||
},
|
||||
|
||||
// optional attributes
|
||||
"reason": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "reason for action",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"extips": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"exttraffic": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"ram": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"seps": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"sep_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
)
|
||||
|
||||
func RGCheckPresence(ctx context.Context, rgId uint64, c *decort.DecortClient) (*rg.RecordRG, error) {
|
||||
tflog.Info(ctx, fmt.Sprintf("Get info about resource group with ID - %v", rgId))
|
||||
|
||||
recordRG, err := c.CloudBroker().RG().Get(ctx, rg.GetRequest{RGID: rgId})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "UtilityRGCheckPresence: response from CloudBroker().RG().Get", map[string]any{"rg_id": rgId, "response": recordRG})
|
||||
|
||||
return recordRG, err
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
)
|
||||
|
||||
func RGAffinityGroupComputesCheckPresence(ctx context.Context, plan *models.DataSourceRGAffinityGroupComputesModel, c *decort.DecortClient) (*rg.ListAffinityGroupCompute, error) {
|
||||
agCompsReq := rg.AffinityGroupComputesRequest{
|
||||
RGID: uint64(plan.RGID.ValueInt64()),
|
||||
AffinityGroup: plan.AffinityGroup.ValueString(),
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGAffinityGroupComputesCheckPresence: before call CloudBroker().RG().AffinityGroupComputes", map[string]any{"req": agCompsReq})
|
||||
agCompsList, err := c.CloudBroker().RG().AffinityGroupComputes(ctx, agCompsReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group affinity group computes with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGAffinityGroupComputesCheckPresence: response from CloudBroker().RG().AffinityGroupComputes", map[string]any{"response": agCompsList})
|
||||
|
||||
return &agCompsList, err
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
)
|
||||
|
||||
func RGAffinityGroupsGetCheckPresence(ctx context.Context, plan *models.DataSourceRGAffinityGroupsGetModel, c *decort.DecortClient) ([]uint64, error) {
|
||||
agReq := rg.AffinityGroupsGetRequest{
|
||||
RGID: uint64(plan.RGID.ValueInt64()),
|
||||
AffinityGroup: plan.AffinityGroup.ValueString(),
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGAffinityGroupsGetCheckPresence: before call CloudBroker().RG().AffinityGroupsGet", map[string]any{"req": agReq})
|
||||
agItem, err := c.CloudBroker().RG().AffinityGroupsGet(ctx, agReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group affinity groups get with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGAffinityGroupsGetCheckPresence: response from CloudBroker().RG().AffinityGroupsGet", map[string]any{"response": agItem})
|
||||
|
||||
return agItem, err
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
)
|
||||
|
||||
func RGAffinityGroupsListCheckPresence(ctx context.Context, plan *models.DataSourceRGAffinityGroupsListModel, c *decort.DecortClient) (*rg.ListAffinityGroup, error) {
|
||||
agListReq := rg.AffinityGroupsListRequest{RGID: uint64(plan.RGID.ValueInt64())}
|
||||
|
||||
if !plan.Page.IsNull() {
|
||||
agListReq.Page = uint64(plan.Page.ValueInt64())
|
||||
}
|
||||
if !plan.Size.IsNull() {
|
||||
agListReq.Size = uint64(plan.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGAffinityGroupsListCheckPresence: before call CloudBroker().RG().AffinityGroupsList", map[string]any{"req": agListReq})
|
||||
agList, err := c.CloudBroker().RG().AffinityGroupsList(ctx, agListReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group affinity groups list with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGAffinityGroupsListCheckPresence: response from CloudBroker().RG().AffinityGroupsList", map[string]any{"response": agList})
|
||||
|
||||
return agList, err
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
)
|
||||
|
||||
func RGAuditsCheckPresence(ctx context.Context, plan *models.DataSourceRGAuditsModel, c *decort.DecortClient) (*rg.ListAudits, error) {
|
||||
auditsReq := rg.AuditsRequest{
|
||||
RGID: uint64(plan.RGID.ValueInt64()),
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGAuditsCheckPresence: before call CloudBroker().RG().Audits", map[string]any{"response": auditsReq})
|
||||
rgAudtis, err := c.CloudBroker().RG().Audits(ctx, auditsReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group audits with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGAuditsCheckPresence: response from CloudBroker().RG().Audits", map[string]any{"response": rgAudtis})
|
||||
|
||||
return &rgAudtis, err
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
)
|
||||
|
||||
func RGGetResourceConsumptionCheckPresence(ctx context.Context, plan *models.DataSourceRGGetResourceConsumptionModel, c *decort.DecortClient) (*rg.ItemResourceConsumption, error) {
|
||||
resConsReq := rg.GetResourceConsumptionRequest{RGID: uint64(plan.RGID.ValueInt64())}
|
||||
|
||||
tflog.Info(ctx, "RGGetResourceConsumptionCheckPresence: before call CloudBroker().RG().GetResourceConsumption", map[string]any{"response": resConsReq})
|
||||
resCons, err := c.CloudBroker().RG().GetResourceConsumption(ctx, resConsReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group get resource consumption with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGGetResourceConsumptionCheckPresence: response from CloudBroker().RG().GetResourceConsumption", map[string]any{"response": resCons})
|
||||
|
||||
return resCons, err
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
)
|
||||
|
||||
func RGListCheckPresence(ctx context.Context, plan *models.DataSourceRGListModel, c *decort.DecortClient) (*rg.ListRG, error) {
|
||||
listReq := rg.ListRequest{}
|
||||
|
||||
if !plan.ById.IsNull() {
|
||||
listReq.ByID = uint64(plan.ById.ValueInt64())
|
||||
}
|
||||
if !plan.Name.IsNull() {
|
||||
listReq.Name = plan.Name.ValueString()
|
||||
}
|
||||
if !plan.AccountId.IsNull() {
|
||||
listReq.AccountID = uint64(plan.AccountId.ValueInt64())
|
||||
}
|
||||
if !plan.AccountName.IsNull() {
|
||||
listReq.AccountName = plan.AccountName.ValueString()
|
||||
}
|
||||
if !plan.CreatedAfter.IsNull() {
|
||||
listReq.CreatedAfter = uint64(plan.CreatedAfter.ValueInt64())
|
||||
}
|
||||
if !plan.CreatedBefore.IsNull() {
|
||||
listReq.CreatedBefore = uint64(plan.CreatedBefore.ValueInt64())
|
||||
}
|
||||
if !plan.Status.IsNull() {
|
||||
listReq.Status = plan.Status.ValueString()
|
||||
}
|
||||
if !plan.LockStatus.IsNull() {
|
||||
listReq.LockStatus = plan.LockStatus.ValueString()
|
||||
}
|
||||
if !plan.IncludeDeleted.IsNull() {
|
||||
listReq.IncludeDeleted = plan.IncludeDeleted.ValueBool()
|
||||
}
|
||||
if !plan.SortBy.IsNull() {
|
||||
listReq.SortBy = plan.SortBy.ValueString()
|
||||
}
|
||||
if !plan.Page.IsNull() {
|
||||
listReq.Page = uint64(plan.Page.ValueInt64())
|
||||
}
|
||||
if !plan.Size.IsNull() {
|
||||
listReq.Size = uint64(plan.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListCheckPresence: before call CloudBroker().RG().List", map[string]any{"response": listReq})
|
||||
rgList, err := c.CloudBroker().RG().List(ctx, listReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListCheckPresence: response from CloudBroker().RG().List", map[string]any{"response": rgList})
|
||||
|
||||
return rgList, err
|
||||
}
|
||||
@@ -0,0 +1,61 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
)
|
||||
|
||||
func RGListComputesCheckPresence(ctx context.Context, plan *models.DataSourceRGListComputesModel, c *decort.DecortClient) (*rg.ListComputes, error) {
|
||||
listCompReq := rg.ListComputesRequest{
|
||||
RGID: uint64(plan.RGID.ValueInt64()),
|
||||
}
|
||||
|
||||
if !plan.ComputeID.IsNull() {
|
||||
listCompReq.ComputeID = uint64(plan.ComputeID.ValueInt64())
|
||||
}
|
||||
if !plan.Name.IsNull() {
|
||||
listCompReq.Name = plan.Name.ValueString()
|
||||
}
|
||||
if !plan.AccountID.IsNull() {
|
||||
listCompReq.AccountID = uint64(plan.AccountID.ValueInt64())
|
||||
}
|
||||
if !plan.TechStatus.IsNull() {
|
||||
listCompReq.TechStatus = plan.TechStatus.ValueString()
|
||||
}
|
||||
if !plan.Status.IsNull() {
|
||||
listCompReq.Status = plan.Status.ValueString()
|
||||
}
|
||||
if !plan.IPAddress.IsNull() {
|
||||
listCompReq.IPAddress = plan.IPAddress.ValueString()
|
||||
}
|
||||
if !plan.ExtNetName.IsNull() {
|
||||
listCompReq.ExtNetName = plan.ExtNetName.ValueString()
|
||||
}
|
||||
if !plan.ExtNetID.IsNull() {
|
||||
listCompReq.ExtNetID = uint64(plan.ExtNetID.ValueInt64())
|
||||
}
|
||||
if !plan.SortBy.IsNull() {
|
||||
listCompReq.SortBy = plan.SortBy.ValueString()
|
||||
}
|
||||
if !plan.Page.IsNull() {
|
||||
listCompReq.Page = uint64(plan.Page.ValueInt64())
|
||||
}
|
||||
if !plan.Size.IsNull() {
|
||||
listCompReq.Size = uint64(plan.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListComputesCheckPresence: before call CloudBroker().RG().ListComputes", map[string]any{"response": listCompReq})
|
||||
rgListComp, err := c.CloudBroker().RG().ListComputes(ctx, listCompReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group list computes with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListComputesCheckPresence: response from CloudBroker().RG().ListComputes", map[string]any{"response": rgListComp})
|
||||
|
||||
return rgListComp, err
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
)
|
||||
|
||||
func RGListDeletedCheckPresence(ctx context.Context, plan *models.DataSourceRGListDeletedModel, c *decort.DecortClient) (*rg.ListRG, error) {
|
||||
listDelReq := rg.ListDeletedRequest{}
|
||||
|
||||
if !plan.ById.IsNull() {
|
||||
listDelReq.ByID = uint64(plan.ById.ValueInt64())
|
||||
}
|
||||
if !plan.Name.IsNull() {
|
||||
listDelReq.Name = plan.Name.ValueString()
|
||||
}
|
||||
if !plan.AccountId.IsNull() {
|
||||
listDelReq.AccountID = uint64(plan.AccountId.ValueInt64())
|
||||
}
|
||||
if !plan.AccountName.IsNull() {
|
||||
listDelReq.AccountName = plan.AccountName.ValueString()
|
||||
}
|
||||
if !plan.CreatedAfter.IsNull() {
|
||||
listDelReq.CreatedAfter = uint64(plan.CreatedAfter.ValueInt64())
|
||||
}
|
||||
if !plan.CreatedBefore.IsNull() {
|
||||
listDelReq.CreatedBefore = uint64(plan.CreatedBefore.ValueInt64())
|
||||
}
|
||||
if !plan.SortBy.IsNull() {
|
||||
listDelReq.SortBy = plan.SortBy.ValueString()
|
||||
}
|
||||
if !plan.LockStatus.IsNull() {
|
||||
listDelReq.LockStatus = plan.LockStatus.ValueString()
|
||||
}
|
||||
if !plan.Page.IsNull() {
|
||||
listDelReq.Page = uint64(plan.Page.ValueInt64())
|
||||
}
|
||||
if !plan.Size.IsNull() {
|
||||
listDelReq.Size = uint64(plan.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListDeletedCheckPresence: before call CloudBroker().RG().ListDeleted", map[string]any{"response": listDelReq})
|
||||
rgListDel, err := c.CloudBroker().RG().ListDeleted(ctx, listDelReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group list deleted with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListDeletedCheckPresence: response from CloudBroker().RG().ListDeleted", map[string]any{"response": rgListDel})
|
||||
|
||||
return rgListDel, err
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
)
|
||||
|
||||
func RGListLBCheckPresence(ctx context.Context, plan *models.DataSourceRGListLBModel, c *decort.DecortClient) (*rg.ListLB, error) {
|
||||
listLBReq := rg.ListLBRequest{
|
||||
RGID: uint64(plan.RGID.ValueInt64()),
|
||||
}
|
||||
|
||||
if !plan.ByID.IsNull() {
|
||||
listLBReq.ByID = uint64(plan.ByID.ValueInt64())
|
||||
}
|
||||
if !plan.Name.IsNull() {
|
||||
listLBReq.Name = plan.Name.ValueString()
|
||||
}
|
||||
if !plan.TechStatus.IsNull() {
|
||||
listLBReq.TechStatus = plan.TechStatus.ValueString()
|
||||
}
|
||||
if !plan.Status.IsNull() {
|
||||
listLBReq.Status = plan.Status.ValueString()
|
||||
}
|
||||
if !plan.FrontIP.IsNull() {
|
||||
listLBReq.FrontIP = plan.FrontIP.ValueString()
|
||||
}
|
||||
if !plan.BackIP.IsNull() {
|
||||
listLBReq.BackIP = plan.BackIP.ValueString()
|
||||
}
|
||||
if !plan.SortBy.IsNull() {
|
||||
listLBReq.SortBy = plan.SortBy.ValueString()
|
||||
}
|
||||
if !plan.Page.IsNull() {
|
||||
listLBReq.Page = uint64(plan.Page.ValueInt64())
|
||||
}
|
||||
if !plan.Size.IsNull() {
|
||||
listLBReq.Size = uint64(plan.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListLBCheckPresence: before call CloudBroker().RG().ListLB", map[string]any{"response": listLBReq})
|
||||
rgListLB, err := c.CloudBroker().RG().ListLB(ctx, listLBReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group list lb with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListLBCheckPresence: response from CloudBroker().RG().ListLB", map[string]any{"response": rgListLB})
|
||||
|
||||
return rgListLB, err
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
)
|
||||
|
||||
func RGListPFWCheckPresence(ctx context.Context, plan *models.DataSourceRGListPFWModel, c *decort.DecortClient) (*rg.ListPFW, error) {
|
||||
listPFWReq := rg.ListPFWRequest{
|
||||
RGID: uint64(plan.RGID.ValueInt64()),
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListPFWCheckPresence: before call CloudBroker().RG().ListPFW", map[string]any{"response": listPFWReq})
|
||||
rgListPFW, err := c.CloudBroker().RG().ListPFW(ctx, listPFWReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group list pfw with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListPFWCheckPresence: response from CloudBroker().RG().ListPFW", map[string]any{"response": rgListPFW})
|
||||
|
||||
return rgListPFW, err
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
)
|
||||
|
||||
func RGListVinsCheckPresence(ctx context.Context, plan *models.DataSourceRGListVinsModel, c *decort.DecortClient) (*rg.ListVINS, error) {
|
||||
listVinsReq := rg.ListVINSRequest{
|
||||
RGID: uint64(plan.RGID.ValueInt64()),
|
||||
}
|
||||
|
||||
if !plan.Name.IsNull() {
|
||||
listVinsReq.Name = plan.Name.ValueString()
|
||||
}
|
||||
if !plan.AccountID.IsNull() {
|
||||
listVinsReq.AccountID = uint64(plan.AccountID.ValueInt64())
|
||||
}
|
||||
if !plan.ExtIP.IsNull() {
|
||||
listVinsReq.ExtIP = plan.ExtIP.ValueString()
|
||||
}
|
||||
if !plan.VINSID.IsNull() {
|
||||
listVinsReq.VINSID = uint64(plan.VINSID.ValueInt64())
|
||||
}
|
||||
if !plan.SortBy.IsNull() {
|
||||
listVinsReq.SortBy = plan.SortBy.ValueString()
|
||||
}
|
||||
if !plan.Page.IsNull() {
|
||||
listVinsReq.Page = uint64(plan.Page.ValueInt64())
|
||||
}
|
||||
if !plan.Size.IsNull() {
|
||||
listVinsReq.Size = uint64(plan.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListVinsCheckPresence: before call CloudBroker().RG().ListVINS", map[string]any{"response": listVinsReq})
|
||||
rgListVins, err := c.CloudBroker().RG().ListVINS(ctx, listVinsReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group list vins with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListVinsCheckPresence: response from CloudBroker().RG().ListVINS", map[string]any{"response": rgListVins})
|
||||
|
||||
return rgListVins, err
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
)
|
||||
|
||||
func RGResourceConsumptionListCheckPresence(ctx context.Context, c *decort.DecortClient) (*rg.ListResourceConsumption, error) {
|
||||
tflog.Info(ctx, "RGResourceConsumptionListCheckPresence: before call CloudBroker().RG().ListResourceConsumption")
|
||||
resConsList, err := c.CloudBroker().RG().ListResourceConsumption(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group resource consumption list with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGResourceConsumptionListCheckPresence: response from CloudBroker().RG().ListResourceConsumption", map[string]any{"response": resConsList})
|
||||
|
||||
return resConsList, err
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
|
||||
)
|
||||
|
||||
func RGUsageCheckPresence(ctx context.Context, plan *models.DataSourceRGUsageModel, c *decort.DecortClient) (*rg.Reservation, error) {
|
||||
usageReq := rg.UsageRequest{RGID: uint64(plan.RGID.ValueInt64())}
|
||||
|
||||
if !plan.Reason.IsNull() {
|
||||
usageReq.Reason = plan.Reason.ValueString()
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGUsageCheckPresence: before call CloudBroker().RG().Usage", map[string]any{"response": usageReq})
|
||||
usage, err := c.CloudBroker().RG().Usage(ctx, usageReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group resource usage with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGUsageCheckPresence: response from CloudBroker().RG().Usage", map[string]any{"response": usage})
|
||||
|
||||
return usage, err
|
||||
}
|
||||
Reference in New Issue
Block a user