1.0.0
This commit is contained in:
91
internal/service/cloudapi/rg/data_source_rg.go
Normal file
91
internal/service/cloudapi/rg/data_source_rg.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRG{}
|
||||
)
|
||||
|
||||
func NewDataSourceRG() datasource.DataSource {
|
||||
return &dataSourceRG{}
|
||||
}
|
||||
|
||||
// dataSourceRG is the data source implementation.
|
||||
type dataSourceRG struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRG) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRG: Error get state")
|
||||
return
|
||||
}
|
||||
rgId := uint64(state.RGID.ValueInt64())
|
||||
tflog.Info(ctx, "Read dataSourceRG: got state successfully", map[string]any{"rg_id": rgId})
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRG: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRG: set timeouts successfully", map[string]any{
|
||||
"rg_id": rgId,
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRG: Error flatten data source rg")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRG: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read resource group", map[string]any{"rg_id": rgId})
|
||||
}
|
||||
|
||||
func (d *dataSourceRG) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRG(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRG) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_resgroup"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRG) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRG")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRG successfully")
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGAffinityGroupComputes{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGAffinityGroupComputes() datasource.DataSource {
|
||||
return &dataSourceRGAffinityGroupComputes{}
|
||||
}
|
||||
|
||||
// dataSourceRGAffinityGroupComputes is the data source implementation.
|
||||
type dataSourceRGAffinityGroupComputes struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupComputes) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGAffinityGroupComputesModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupComputes: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGAffinityGroupComputes: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupComputes: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGAffinityGroupComputes: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGAffinityGroupComputesDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupComputes: Error flatten data source rg affinity group computes")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupComputes: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGAffinityGroupComputes")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupComputes) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGAffinityGroupComputes(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupComputes) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_rg_affinity_group_computes"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGAffinityGroupComputes) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGAffinityGroupComputes")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGAffinityGroupComputes successfully")
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGAffinityGroupsGet{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGAffinityGroupsGet() datasource.DataSource {
|
||||
return &dataSourceRGAffinityGroupsGet{}
|
||||
}
|
||||
|
||||
// dataSourceRGAffinityGroupsGet is the data source implementation.
|
||||
type dataSourceRGAffinityGroupsGet struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupsGet) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGAffinityGroupsGetModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupsGet: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGAffinityGroupsGet: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupsGet: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGAffinityGroupsGet: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGAffinityGroupsGetDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupsGet: Error flatten data source rg affinity group get")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupsGet: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGAffinityGroupsGet")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupsGet) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGAffinityGroupsGet(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupsGet) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_rg_affinity_groups_get"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGAffinityGroupsGet) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGAffinityGroupsGet")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGAffinityGroupsGet successfully")
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGAffinityGroupsList{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGAffinityGroupsList() datasource.DataSource {
|
||||
return &dataSourceRGAffinityGroupsList{}
|
||||
}
|
||||
|
||||
// dataSourceRGAffinityGroupsList is the data source implementation.
|
||||
type dataSourceRGAffinityGroupsList struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupsList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGAffinityGroupsListModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupsList: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGAffinityGroupsList: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupsList: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGAffinityGroupsList: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGAffinityGroupsListDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupsList: Error flatten data source rg resource consumption list")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAffinityGroupsList: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGAffinityGroupsList")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupsList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGAffinityGroupsList(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAffinityGroupsList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_rg_affinity_groups_list"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGAffinityGroupsList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGAffinityGroupsList")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGAffinityGroupsList successfully")
|
||||
}
|
||||
89
internal/service/cloudapi/rg/data_source_rg_audits.go
Normal file
89
internal/service/cloudapi/rg/data_source_rg_audits.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGAudits{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGAudits() datasource.DataSource {
|
||||
return &dataSourceRGAudits{}
|
||||
}
|
||||
|
||||
// dataSourceRGAudits is the data source implementation.
|
||||
type dataSourceRGAudits struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAudits) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGAuditsModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAudits: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGAudits: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAudits: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGAudits: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGAuditsDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAudits: Error flatten data source rg audits")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGAudits: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGAudits")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAudits) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGAudits(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGAudits) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_rg_audits"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGAudits) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGAudits")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGAudits successfully")
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGGetResourceConsumption{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGGetResourceConsumption() datasource.DataSource {
|
||||
return &dataSourceRGGetResourceConsumption{}
|
||||
}
|
||||
|
||||
// dataSourceRGGetResourceConsumption is the data source implementation.
|
||||
type dataSourceRGGetResourceConsumption struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGGetResourceConsumption) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGGetResourceConsumptionModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGGetResourceConsumption: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGGetResourceConsumption: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGGetResourceConsumption: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGGetResourceConsumption: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGGetResourceConsumptionDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGGetResourceConsumption: Error flatten data source rg get resource consumption")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGGetResourceConsumption: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGGetResourceConsumption")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGGetResourceConsumption) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGGetResourceConsumption(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGGetResourceConsumption) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_rg_resource_consumption_get"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGGetResourceConsumption) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGGetResourceConsumption")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGGetResourceConsumption successfully")
|
||||
}
|
||||
89
internal/service/cloudapi/rg/data_source_rg_list.go
Normal file
89
internal/service/cloudapi/rg/data_source_rg_list.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGList{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGList() datasource.DataSource {
|
||||
return &dataSourceRGList{}
|
||||
}
|
||||
|
||||
// dataSourceRGList is the data source implementation.
|
||||
type dataSourceRGList struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGListModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGList: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGList: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGList: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGList: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGListDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGList: Error flatten data source rg list")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGList: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGList")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGList(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_rg_list"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGList")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGList successfully")
|
||||
}
|
||||
89
internal/service/cloudapi/rg/data_source_rg_list_computes.go
Normal file
89
internal/service/cloudapi/rg/data_source_rg_list_computes.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGListComputes{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGListComputes() datasource.DataSource {
|
||||
return &dataSourceRGListComputes{}
|
||||
}
|
||||
|
||||
// dataSourceRGListComputes is the data source implementation.
|
||||
type dataSourceRGListComputes struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListComputes) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGListComputesModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListComputes: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListComputes: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListComputes: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListComputes: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGListComputesDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListComputes: Error flatten data source rg list")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListComputes: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGListComputes")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListComputes) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGListComputes(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListComputes) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_rg_list_computes"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGListComputes) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGListComputes")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGListComputes successfully")
|
||||
}
|
||||
89
internal/service/cloudapi/rg/data_source_rg_list_deleted.go
Normal file
89
internal/service/cloudapi/rg/data_source_rg_list_deleted.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGListDeleted{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGListDeleted() datasource.DataSource {
|
||||
return &dataSourceRGListDeleted{}
|
||||
}
|
||||
|
||||
// dataSourceRGListDeleted is the data source implementation.
|
||||
type dataSourceRGListDeleted struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListDeleted) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGListDeletedModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListDeleted: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListDeleted: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListDeleted: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListDeleted: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGListDeletedDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListDeleted: Error flatten data source rg list")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListDeleted: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGListDeleted")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListDeleted) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGListDeleted(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_rg_list_deleted"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGListDeleted) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGListDeleted")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGListDeleted successfully")
|
||||
}
|
||||
89
internal/service/cloudapi/rg/data_source_rg_list_lb.go
Normal file
89
internal/service/cloudapi/rg/data_source_rg_list_lb.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGListLB{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGListLB() datasource.DataSource {
|
||||
return &dataSourceRGListLB{}
|
||||
}
|
||||
|
||||
// dataSourceRGListLB is the data source implementation.
|
||||
type dataSourceRGListLB struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListLB) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGListLBModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListLB: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListLB: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListLB: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListLB: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGListLBDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListLB: Error flatten data source rg list lb")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListLB: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGListLB")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListLB) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGListLB(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListLB) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_rg_list_lb"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGListLB) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGListLB")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGListLB successfully")
|
||||
}
|
||||
89
internal/service/cloudapi/rg/data_source_rg_list_pfw.go
Normal file
89
internal/service/cloudapi/rg/data_source_rg_list_pfw.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGListPFW{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGListPFW() datasource.DataSource {
|
||||
return &dataSourceRGListPFW{}
|
||||
}
|
||||
|
||||
// dataSourceRGListPFW is the data source implementation.
|
||||
type dataSourceRGListPFW struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListPFW) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGListPFWModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListPFW: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListPFW: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListPFW: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListPFW: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGListPFWDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListPFW: Error flatten data source rg list pfw")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListPFW: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGListPFW")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListPFW) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGListPFW(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListPFW) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_rg_list_pfw"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGListPFW) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGListPFW")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGListPFW successfully")
|
||||
}
|
||||
89
internal/service/cloudapi/rg/data_source_rg_list_vins.go
Normal file
89
internal/service/cloudapi/rg/data_source_rg_list_vins.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGListVins{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGListVins() datasource.DataSource {
|
||||
return &dataSourceRGListVins{}
|
||||
}
|
||||
|
||||
// dataSourceRGListVins is the data source implementation.
|
||||
type dataSourceRGListVins struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListVins) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGListVinsModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListVins: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListVins: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListVins: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGListVins: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGListVinsDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListVins: Error flatten data source rg list")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGListVins: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGListVins")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListVins) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGListVins(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGListVins) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_rg_list_vins"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGListVins) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGListVins")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGListVins successfully")
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGResourceConsumptionList{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGResourceConsumptionList() datasource.DataSource {
|
||||
return &dataSourceRGResourceConsumptionList{}
|
||||
}
|
||||
|
||||
// dataSourceRGResourceConsumptionList is the data source implementation.
|
||||
type dataSourceRGResourceConsumptionList struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGResourceConsumptionList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGResourceConsumptionListModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGResourceConsumptionList: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGResourceConsumptionList: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGResourceConsumptionList: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGResourceConsumptionList: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGResourceConsumptionListDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGResourceConsumptionList: Error flatten data source rg resource consumption list")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGResourceConsumptionList: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGResourceConsumptionList")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGResourceConsumptionList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGResourceConsumptionList(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGResourceConsumptionList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_rg_resource_consumption_list"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGResourceConsumptionList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGResourceConsumptionList")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGResourceConsumptionList successfully")
|
||||
}
|
||||
89
internal/service/cloudapi/rg/data_source_rg_usage.go
Normal file
89
internal/service/cloudapi/rg/data_source_rg_usage.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceRGUsage{}
|
||||
)
|
||||
|
||||
func NewDataSourceRGUsage() datasource.DataSource {
|
||||
return &dataSourceRGUsage{}
|
||||
}
|
||||
|
||||
// dataSourceRGUsage is the data source implementation.
|
||||
type dataSourceRGUsage struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceRGUsage) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceRGUsageModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGUsage: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGUsage: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGUsage: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceRGUsage: set timeouts successfully", map[string]any{
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.RGUsageDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGUsage: Error flatten data source rg usage")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceRGUsage: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceRGUsage")
|
||||
}
|
||||
|
||||
func (d *dataSourceRGUsage) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceRGUsage(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceRGUsage) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_rg_usage"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceRGUsage) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceRGUsage")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceRGUsage successfully")
|
||||
}
|
||||
101
internal/service/cloudapi/rg/flattens/flatten_data_source_rg.go
Normal file
101
internal/service/cloudapi/rg/flattens/flatten_data_source_rg.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/utilities"
|
||||
)
|
||||
|
||||
// RGDataSource flattens data source for rg (resource group).
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGDataSource(ctx context.Context, state *models.DataSourceRGModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgId := uint64(state.RGID.ValueInt64())
|
||||
|
||||
recordRG, err := utilities.RGCheckPresence(ctx, rgId, c)
|
||||
if err != nil {
|
||||
diags.AddError(fmt.Sprintf("Cannot get info about resource group with ID %v", rgId), err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGDataSource: before flatten", map[string]any{"rg_id": rgId, "recordRG": recordRG})
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGModel{
|
||||
RGID: state.RGID,
|
||||
Reason: state.Reason,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
Id: types.StringValue(id.String()),
|
||||
AccountID: types.Int64Value(int64(recordRG.AccountID)),
|
||||
GID: types.Int64Value(int64(recordRG.GID)),
|
||||
Name: types.StringValue(recordRG.Name),
|
||||
DefNetType: types.StringValue(recordRG.DefNetType),
|
||||
Description: types.StringValue(recordRG.Description),
|
||||
RegisterComputes: types.BoolValue(recordRG.RegisterComputes),
|
||||
AccountName: types.StringValue(recordRG.AccountName),
|
||||
ACL: flattenACL(ctx, &recordRG.ACL),
|
||||
CPUAllocationParameter: types.StringValue(recordRG.CPUAllocationParameter),
|
||||
CPUAllocationRatio: types.Float64Value(recordRG.CPUAllocationRatio),
|
||||
CreatedBy: types.StringValue(recordRG.CreatedBy),
|
||||
CreatedTime: types.Int64Value(int64(recordRG.CreatedTime)),
|
||||
DefNetID: types.Int64Value(recordRG.DefNetID),
|
||||
DeletedBy: types.StringValue(recordRG.DeletedBy),
|
||||
DeletedTime: types.Int64Value(int64(recordRG.DeletedTime)),
|
||||
Dirty: types.BoolValue(recordRG.Dirty),
|
||||
GUID: types.Int64Value(int64(recordRG.GUID)),
|
||||
LockStatus: types.StringValue(recordRG.LockStatus),
|
||||
Milestones: types.Int64Value(int64(recordRG.Milestones)),
|
||||
Secret: types.StringValue(recordRG.Secret),
|
||||
Status: types.StringValue(recordRG.Status),
|
||||
UpdatedBy: types.StringValue(recordRG.UpdatedBy),
|
||||
UpdatedTime: types.Int64Value(int64(recordRG.UpdatedTime)),
|
||||
ResourceLimits: &models.ResourceLimitsModel{
|
||||
CUC: types.Float64Value(recordRG.ResourceLimits.CUC),
|
||||
CUD: types.Float64Value(recordRG.ResourceLimits.CUD),
|
||||
CUDM: types.Float64Value(recordRG.ResourceLimits.CUDM),
|
||||
CUI: types.Float64Value(recordRG.ResourceLimits.CUI),
|
||||
CUM: types.Float64Value(recordRG.ResourceLimits.CUM),
|
||||
CUNP: types.Float64Value(recordRG.ResourceLimits.CUNP),
|
||||
GPUUnits: types.Float64Value(recordRG.ResourceLimits.GPUUnits),
|
||||
},
|
||||
}
|
||||
|
||||
var diagsItem diag.Diagnostics
|
||||
state.UniqPools, diagsItem = types.ListValueFrom(ctx, types.StringType, recordRG.UniqPools)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGDataSource: cannot flatten recordRG.UniqPools to state.UniqPools", diags))
|
||||
}
|
||||
state.VINS, diagsItem = types.ListValueFrom(ctx, types.Int64Type, recordRG.VINS)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGDataSource: cannot flatten recordRG.VINS to state.VINS", diags))
|
||||
}
|
||||
state.ResTypes, diagsItem = types.ListValueFrom(ctx, types.StringType, recordRG.ResTypes)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGDataSource: cannot flatten recordRG.ResTypes to state.ResTypes", diags))
|
||||
}
|
||||
state.Computes, diagsItem = types.ListValueFrom(ctx, types.Int64Type, recordRG.Computes)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGDataSource: cannot flatten recordRG.Computes to state.Computes", diags))
|
||||
}
|
||||
state.ComputeFeatures, diagsItem = types.ListValueFrom(ctx, types.StringType, recordRG.ComputeFeatures)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGDataSource: cannot flatten recordRG.ComputeFeatures to state.ComputeFeatures", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGDataSource: after flatten", map[string]any{"rg_id": state.Id.ValueString()})
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGDataSource", map[string]any{"rg_id": state.Id.ValueString()})
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,82 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/utilities"
|
||||
)
|
||||
|
||||
// RGAffinityGroupComputesDataSource flattens data source for rg affinity group computes.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGAffinityGroupComputesDataSource(ctx context.Context, state *models.DataSourceRGAffinityGroupComputesModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGAffinityGroupComputesDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
agCompsList, err := utilities.RGAffinityGroupComputesCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group affinity group computes", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGAffinityGroupComputesDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGAffinityGroupComputesModel{
|
||||
RGID: state.RGID,
|
||||
AffinityGroup: state.AffinityGroup,
|
||||
|
||||
Timeouts: state.Timeouts,
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
var diagsItem diag.Diagnostics
|
||||
items := make([]models.ItemAffinityGroupComputeModel, 0, len(*agCompsList))
|
||||
for _, comp := range *agCompsList {
|
||||
item := models.ItemAffinityGroupComputeModel{
|
||||
ComputeID: types.Int64Value(int64(comp.ComputeID)),
|
||||
}
|
||||
|
||||
item.OtherNode, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.OtherNode)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.OtherNode to item.OtherNode", diags))
|
||||
}
|
||||
item.OtherNodeIndirect, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.OtherNodeIndirect)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.OtherNodeIndirect to item.OtherNodeIndirect", diags))
|
||||
}
|
||||
item.OtherNodeIndirectSoft, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.OtherNodeIndirectSoft)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.OtherNodeIndirectSoft to item.OtherNodeIndirectSoft", diags))
|
||||
}
|
||||
item.OtherNodeSoft, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.OtherNodeSoft)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.OtherNodeSoft to item.OtherNodeSoft", diags))
|
||||
}
|
||||
item.SameNode, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.SameNode)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.SameNode to item.SameNode", diags))
|
||||
}
|
||||
item.SameNodeSoft, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.SameNodeSoft)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.SameNodeSoft to item.SameNodeSoft", diags))
|
||||
}
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
state.Items = items
|
||||
|
||||
tflog.Info(ctx, "flattens.RGAffinityGroupComputesDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGAffinityGroupComputesDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,51 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/utilities"
|
||||
)
|
||||
|
||||
// RGAffinityGroupsGetDataSource flattens data source for rg affinity groups get.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGAffinityGroupsGetDataSource(ctx context.Context, state *models.DataSourceRGAffinityGroupsGetModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGAffinityGroupsGetDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
agItem, err := utilities.RGAffinityGroupsGetCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group affinity groups get", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGAffinityGroupsGetDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGAffinityGroupsGetModel{
|
||||
RGID: state.RGID,
|
||||
AffinityGroup: state.AffinityGroup,
|
||||
|
||||
Timeouts: state.Timeouts,
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
var diagsItem diag.Diagnostics
|
||||
state.Ids, diagsItem = types.ListValueFrom(ctx, types.Int64Type, agItem)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupsGetDataSource: cannot flatten agItem to state.Ids", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGAffinityGroupsGetDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGAffinityGroupsGetDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,68 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/utilities"
|
||||
)
|
||||
|
||||
// RGAffinityGroupsListDataSource flattens data source for rg affinity groups list.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGAffinityGroupsListDataSource(ctx context.Context, state *models.DataSourceRGAffinityGroupsListModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGAffinityGroupsListDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
agList, err := utilities.RGAffinityGroupsListCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group affinity groups list", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGAffinityGroupsListDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGAffinityGroupsListModel{
|
||||
RGID: state.RGID,
|
||||
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemAffinityGroupModel, 0, len(agList.Data))
|
||||
for _, data := range agList.Data {
|
||||
for agLabel, listAG := range data {
|
||||
item := models.ItemAffinityGroupModel{
|
||||
Label: types.StringValue(agLabel),
|
||||
}
|
||||
|
||||
ids := make([]models.ItemIDModel, 0, len(listAG))
|
||||
for _, agItem := range listAG {
|
||||
idItem := models.ItemIDModel{
|
||||
Id: types.Int64Value(int64(agItem.ID)),
|
||||
NodeId: types.Int64Value(int64(agItem.NodeID)),
|
||||
}
|
||||
ids = append(ids, idItem)
|
||||
}
|
||||
item.Ids = ids
|
||||
items = append(items, item)
|
||||
}
|
||||
}
|
||||
state.AffinityGroups = items
|
||||
state.EntryCount = types.Int64Value(int64(agList.EntryCount))
|
||||
|
||||
tflog.Info(ctx, "flattens.RGAffinityGroupsListDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGAffinityGroupsListDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/utilities"
|
||||
)
|
||||
|
||||
// RGAuditsDataSource flattens data source for rg audits.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGAuditsDataSource(ctx context.Context, state *models.DataSourceRGAuditsModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGAuditsDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgAudits, err := utilities.RGAuditsCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group audits", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGAuditsDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGAuditsModel{
|
||||
RGID: state.RGID,
|
||||
Timeouts: state.Timeouts,
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemsRGAuditModel, 0, len(*rgAudits))
|
||||
for _, auditItem := range *rgAudits {
|
||||
item := models.ItemsRGAuditModel{
|
||||
Call: types.StringValue(auditItem.Call),
|
||||
ResponseTime: types.Float64Value(auditItem.ResponseTime),
|
||||
StatusCode: types.Int64Value(int64(auditItem.StatusCode)),
|
||||
Timestamp: types.Float64Value(auditItem.Timestamp),
|
||||
User: types.StringValue(auditItem.User),
|
||||
}
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
state.Items = items
|
||||
|
||||
tflog.Info(ctx, "flattens.RGAuditsDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGAuditsDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,98 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/utilities"
|
||||
)
|
||||
|
||||
// RGGetResourceConsumptionDataSource flattens data source for rg get resource consumption.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGGetResourceConsumptionDataSource(ctx context.Context, state *models.DataSourceRGGetResourceConsumptionModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGGetResourceConsumptionDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
resourceItem, err := utilities.RGGetResourceConsumptionCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group get resource consumption", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGGetResourceConsumptionDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGGetResourceConsumptionModel{
|
||||
RGID: state.RGID,
|
||||
Timeouts: state.Timeouts,
|
||||
Id: types.StringValue(id.String()),
|
||||
|
||||
Consumed: &models.ResourceModel{
|
||||
CPU: types.Int64Value(resourceItem.Consumed.CPU),
|
||||
DiskSize: types.Float64Value(resourceItem.Consumed.DiskSize),
|
||||
DiskSizeMax: types.Float64Value(resourceItem.Consumed.DiskSizeMax),
|
||||
ExtIPs: types.Int64Value(resourceItem.Consumed.ExtIPs),
|
||||
ExtTraffic: types.Int64Value(resourceItem.Consumed.ExtTraffic),
|
||||
GPU: types.Int64Value(resourceItem.Consumed.GPU),
|
||||
RAM: types.Int64Value(resourceItem.Consumed.RAM),
|
||||
},
|
||||
Reserved: &models.ResourceModel{
|
||||
CPU: types.Int64Value(resourceItem.Reserved.CPU),
|
||||
DiskSize: types.Float64Value(resourceItem.Reserved.DiskSize),
|
||||
DiskSizeMax: types.Float64Value(resourceItem.Reserved.DiskSizeMax),
|
||||
ExtIPs: types.Int64Value(resourceItem.Reserved.ExtIPs),
|
||||
ExtTraffic: types.Int64Value(resourceItem.Reserved.ExtTraffic),
|
||||
GPU: types.Int64Value(resourceItem.Reserved.GPU),
|
||||
RAM: types.Int64Value(resourceItem.Reserved.RAM),
|
||||
},
|
||||
ResourceLimits: &models.ResourceLimitsModel{
|
||||
CUC: types.Float64Value(resourceItem.ResourceLimits.CUC),
|
||||
CUD: types.Float64Value(resourceItem.ResourceLimits.CUD),
|
||||
CUDM: types.Float64Value(resourceItem.ResourceLimits.CUDM),
|
||||
CUI: types.Float64Value(resourceItem.ResourceLimits.CUI),
|
||||
CUM: types.Float64Value(resourceItem.ResourceLimits.CUM),
|
||||
CUNP: types.Float64Value(resourceItem.ResourceLimits.CUNP),
|
||||
GPUUnits: types.Float64Value(resourceItem.ResourceLimits.GPUUnits),
|
||||
},
|
||||
}
|
||||
|
||||
sepsConsumed := make([]models.SEPsModel, 0, len(resourceItem.Consumed.SEPs))
|
||||
for sepId, data := range resourceItem.Consumed.SEPs {
|
||||
for dataName, diskData := range data {
|
||||
sepItem := models.SEPsModel{
|
||||
SepID: types.StringValue(sepId),
|
||||
DataName: types.StringValue(dataName),
|
||||
DiskSize: types.Float64Value(diskData.DiskSize),
|
||||
DiskSizeMax: types.Float64Value(diskData.DiskSizeMax),
|
||||
}
|
||||
sepsConsumed = append(sepsConsumed, sepItem)
|
||||
}
|
||||
}
|
||||
state.Consumed.SEPs = sepsConsumed
|
||||
|
||||
sepsReserved := make([]models.SEPsModel, 0, len(resourceItem.Reserved.SEPs))
|
||||
for sepId, data := range resourceItem.Reserved.SEPs {
|
||||
for dataName, diskData := range data {
|
||||
sepItem := models.SEPsModel{
|
||||
SepID: types.StringValue(sepId),
|
||||
DataName: types.StringValue(dataName),
|
||||
DiskSize: types.Float64Value(diskData.DiskSize),
|
||||
DiskSizeMax: types.Float64Value(diskData.DiskSizeMax),
|
||||
}
|
||||
sepsReserved = append(sepsReserved, sepItem)
|
||||
}
|
||||
}
|
||||
state.Reserved.SEPs = sepsReserved
|
||||
|
||||
tflog.Info(ctx, "flattens.RGGetResourceConsumptionDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGGetResourceConsumptionDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,120 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/utilities"
|
||||
)
|
||||
|
||||
// RGListDataSource flattens data source for rg list.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGListDataSource(ctx context.Context, state *models.DataSourceRGListModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGListDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgList, err := utilities.RGListCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group list", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGListModel{
|
||||
ById: state.ById,
|
||||
Name: state.Name,
|
||||
AccountId: state.AccountId,
|
||||
AccountName: state.AccountName,
|
||||
CreatedAfter: state.CreatedAfter,
|
||||
CreatedBefore: state.CreatedBefore,
|
||||
Status: state.Status,
|
||||
LockStatus: state.LockStatus,
|
||||
IncludeDeleted: state.IncludeDeleted,
|
||||
SortBy: state.SortBy,
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemsRGListModel, 0, len(rgList.Data))
|
||||
for _, rgItem := range rgList.Data {
|
||||
item := models.ItemsRGListModel{
|
||||
AccountACL: flattenACL(ctx, &rgItem.ACL),
|
||||
AccountID: types.Int64Value(int64(rgItem.AccountID)),
|
||||
AccountName: types.StringValue(rgItem.AccountName),
|
||||
CPUAllocationParameter: types.StringValue(rgItem.CPUAllocationParameter),
|
||||
CPUAllocationRatio: types.Float64Value(rgItem.CPUAllocationRatio),
|
||||
CreatedBy: types.StringValue(rgItem.CreatedBy),
|
||||
CreatedTime: types.Int64Value(int64(rgItem.CreatedTime)),
|
||||
DefNetID: types.Int64Value(rgItem.DefNetID),
|
||||
DefNetType: types.StringValue(rgItem.DefNetType),
|
||||
DeletedBy: types.StringValue(rgItem.DeletedBy),
|
||||
DeletedTime: types.Int64Value(int64(rgItem.DeletedTime)),
|
||||
Description: types.StringValue(rgItem.Description),
|
||||
Dirty: types.BoolValue(rgItem.Dirty),
|
||||
GID: types.Int64Value(int64(rgItem.GID)),
|
||||
GUID: types.Int64Value(int64(rgItem.GUID)),
|
||||
RGID: types.Int64Value(int64(rgItem.ID)),
|
||||
LockStatus: types.StringValue(rgItem.LockStatus),
|
||||
Milestones: types.Int64Value(int64(rgItem.Milestones)),
|
||||
Name: types.StringValue(rgItem.Name),
|
||||
RegisterComputes: types.BoolValue(rgItem.RegisterComputes),
|
||||
ResourceLimits: &models.ResourceLimitsModel{
|
||||
CUC: types.Float64Value(rgItem.ResourceLimits.CUC),
|
||||
CUD: types.Float64Value(rgItem.ResourceLimits.CUD),
|
||||
CUDM: types.Float64Value(rgItem.ResourceLimits.CUDM),
|
||||
CUI: types.Float64Value(rgItem.ResourceLimits.CUI),
|
||||
CUM: types.Float64Value(rgItem.ResourceLimits.CUM),
|
||||
CUNP: types.Float64Value(rgItem.ResourceLimits.CUNP),
|
||||
GPUUnits: types.Float64Value(rgItem.ResourceLimits.GPUUnits),
|
||||
},
|
||||
Secret: types.StringValue(rgItem.Secret),
|
||||
Status: types.StringValue(rgItem.Status),
|
||||
UpdatedBy: types.StringValue(rgItem.UpdatedBy),
|
||||
UpdatedTime: types.Int64Value(int64(rgItem.UpdatedTime)),
|
||||
}
|
||||
|
||||
var diagsItem diag.Diagnostics
|
||||
item.UniqPools, diagsItem = types.ListValueFrom(ctx, types.StringType, rgItem.UniqPools)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGListDataSource: cannot flatten rgItem.UniqPools to item.UniqPools", diags))
|
||||
}
|
||||
item.VINS, diagsItem = types.ListValueFrom(ctx, types.Int64Type, rgItem.VINS)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGListDataSource: cannot flatten rgItem.VINS to item.VINS", diags))
|
||||
}
|
||||
item.ResTypes, diagsItem = types.ListValueFrom(ctx, types.StringType, rgItem.ResTypes)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGListDataSource: cannot flatten rgItem.ResTypes to item.ResTypes", diags))
|
||||
}
|
||||
item.VMS, diagsItem = types.ListValueFrom(ctx, types.Int64Type, rgItem.Computes)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGListDataSource: cannot flatten rgItem.Computes to item.Computes", diags))
|
||||
}
|
||||
item.ComputeFeatures, diagsItem = types.ListValueFrom(ctx, types.StringType, rgItem.ComputeFeatures)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGListDataSource: cannot flatten rgItem.ComputeFeatures to item.ComputeFeatures", diags))
|
||||
}
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
state.Items = items
|
||||
state.EntryCount = types.Int64Value(int64(rgList.EntryCount))
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGListDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,120 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/utilities"
|
||||
)
|
||||
|
||||
// RGListComputesDataSource flattens data source for rg list.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGListComputesDataSource(ctx context.Context, state *models.DataSourceRGListComputesModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGListComputesDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgListComputes, err := utilities.RGListComputesCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group list computes", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListComputesDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGListComputesModel{
|
||||
RGID: state.RGID,
|
||||
ComputeID: state.ComputeID,
|
||||
Name: state.Name,
|
||||
AccountID: state.AccountID,
|
||||
TechStatus: state.TechStatus,
|
||||
Status: state.Status,
|
||||
IPAddress: state.IPAddress,
|
||||
ExtNetName: state.ExtNetName,
|
||||
ExtNetID: state.ExtNetID,
|
||||
SortBy: state.SortBy,
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemsRGListComputeModel, 0, len(rgListComputes.Data))
|
||||
for _, compItem := range rgListComputes.Data {
|
||||
item := models.ItemsRGListComputeModel{
|
||||
AccountID: types.Int64Value(int64(compItem.AccountID)),
|
||||
AccountName: types.StringValue(compItem.AccountName),
|
||||
AffinityLabel: types.StringValue(compItem.AffinityLabel),
|
||||
AffinityRules: flattenAffinityRules(ctx, &compItem.AffinityRules),
|
||||
AffinityWeight: types.Int64Value(int64(compItem.AffinityWeight)),
|
||||
AntiAffinityRules: flattenAffinityRules(ctx, &compItem.AntiAffinityRules),
|
||||
CPUs: types.Int64Value(int64(compItem.CPUs)),
|
||||
CreatedBy: types.StringValue(compItem.CreatedBy),
|
||||
CreatedTime: types.Int64Value(int64(compItem.CreatedTime)),
|
||||
DeletedBy: types.StringValue(compItem.DeletedBy),
|
||||
DeletedTime: types.Int64Value(int64(compItem.DeletedTime)),
|
||||
ID: types.Int64Value(int64(compItem.ID)),
|
||||
Name: types.StringValue(compItem.Name),
|
||||
RAM: types.Int64Value(int64(compItem.RAM)),
|
||||
Registered: types.BoolValue(compItem.Registered),
|
||||
RGName: types.StringValue(compItem.RGName),
|
||||
Status: types.StringValue(compItem.Status),
|
||||
TechStatus: types.StringValue(compItem.TechStatus),
|
||||
TotalDisksSize: types.Int64Value(int64(compItem.TotalDisksSize)),
|
||||
UpdatedBy: types.StringValue(compItem.UpdatedBy),
|
||||
UpdatedTime: types.Int64Value(int64(compItem.UpdatedTime)),
|
||||
UserManaged: types.BoolValue(compItem.UserManaged),
|
||||
VINSConnected: types.Int64Value(int64(compItem.VINSConnected)),
|
||||
}
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
state.Items = items
|
||||
state.EntryCount = types.Int64Value(int64(rgListComputes.EntryCount))
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListComputesDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGListComputesDataSource")
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenAffinityRules(ctx context.Context, items *rg.ListRules) types.List {
|
||||
tflog.Info(ctx, "Start flattenAffinityRules")
|
||||
tempSlice := make([]types.Object, 0, len(*items))
|
||||
for _, ruleItem := range *items {
|
||||
temp := models.AffinityRuleModel{
|
||||
GUID: types.StringValue(ruleItem.GUID),
|
||||
Key: types.StringValue(ruleItem.Key),
|
||||
Mode: types.StringValue(ruleItem.Mode),
|
||||
Policy: types.StringValue(ruleItem.Policy),
|
||||
Topology: types.StringValue(ruleItem.Topology),
|
||||
Value: types.StringValue(ruleItem.Value),
|
||||
}
|
||||
|
||||
obj, diags := types.ObjectValueFrom(ctx, models.ItemAffinityRule, temp)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenAffinityRules struct to obj", diags))
|
||||
}
|
||||
tempSlice = append(tempSlice, obj)
|
||||
}
|
||||
|
||||
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemAffinityRule}, tempSlice)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenAffinityRules", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenAffinityRules")
|
||||
return res
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/utilities"
|
||||
)
|
||||
|
||||
// RGListDeletedDataSource flattens data source for rg list deleted.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGListDeletedDataSource(ctx context.Context, state *models.DataSourceRGListDeletedModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGListDeletedDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgList, err := utilities.RGListDeletedCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group list deleted", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListDeletedDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGListDeletedModel{
|
||||
ById: state.ById,
|
||||
Name: state.Name,
|
||||
AccountId: state.AccountId,
|
||||
AccountName: state.AccountName,
|
||||
CreatedAfter: state.CreatedAfter,
|
||||
CreatedBefore: state.CreatedBefore,
|
||||
LockStatus: state.LockStatus,
|
||||
SortBy: state.SortBy,
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemsRGListDeletedModel, 0, len(rgList.Data))
|
||||
for _, rgItem := range rgList.Data {
|
||||
item := models.ItemsRGListDeletedModel{
|
||||
AccountACL: flattenACL(ctx, &rgItem.ACL),
|
||||
AccountID: types.Int64Value(int64(rgItem.AccountID)),
|
||||
AccountName: types.StringValue(rgItem.AccountName),
|
||||
CPUAllocationParameter: types.StringValue(rgItem.CPUAllocationParameter),
|
||||
CPUAllocationRatio: types.Float64Value(rgItem.CPUAllocationRatio),
|
||||
CreatedBy: types.StringValue(rgItem.CreatedBy),
|
||||
CreatedTime: types.Int64Value(int64(rgItem.CreatedTime)),
|
||||
DefNetID: types.Int64Value(rgItem.DefNetID),
|
||||
DefNetType: types.StringValue(rgItem.DefNetType),
|
||||
DeletedBy: types.StringValue(rgItem.DeletedBy),
|
||||
DeletedTime: types.Int64Value(int64(rgItem.DeletedTime)),
|
||||
Description: types.StringValue(rgItem.Description),
|
||||
Dirty: types.BoolValue(rgItem.Dirty),
|
||||
GID: types.Int64Value(int64(rgItem.GID)),
|
||||
GUID: types.Int64Value(int64(rgItem.GUID)),
|
||||
RGID: types.Int64Value(int64(rgItem.ID)),
|
||||
LockStatus: types.StringValue(rgItem.LockStatus),
|
||||
Milestones: types.Int64Value(int64(rgItem.Milestones)),
|
||||
Name: types.StringValue(rgItem.Name),
|
||||
RegisterComputes: types.BoolValue(rgItem.RegisterComputes),
|
||||
ResourceLimits: &models.ResourceLimitsModel{
|
||||
CUC: types.Float64Value(rgItem.ResourceLimits.CUC),
|
||||
CUD: types.Float64Value(rgItem.ResourceLimits.CUD),
|
||||
CUDM: types.Float64Value(rgItem.ResourceLimits.CUDM),
|
||||
CUI: types.Float64Value(rgItem.ResourceLimits.CUI),
|
||||
CUM: types.Float64Value(rgItem.ResourceLimits.CUM),
|
||||
CUNP: types.Float64Value(rgItem.ResourceLimits.CUNP),
|
||||
GPUUnits: types.Float64Value(rgItem.ResourceLimits.GPUUnits),
|
||||
},
|
||||
Secret: types.StringValue(rgItem.Secret),
|
||||
Status: types.StringValue(rgItem.Status),
|
||||
UpdatedBy: types.StringValue(rgItem.UpdatedBy),
|
||||
UpdatedTime: types.Int64Value(int64(rgItem.UpdatedTime)),
|
||||
}
|
||||
|
||||
var diagsItem diag.Diagnostics
|
||||
item.UniqPools, diagsItem = types.ListValueFrom(ctx, types.StringType, rgItem.UniqPools)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGListDeletedDataSource: cannot flatten rgItem.UniqPools to item.UniqPools", diags))
|
||||
}
|
||||
item.VINS, diagsItem = types.ListValueFrom(ctx, types.Int64Type, rgItem.VINS)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGListDeletedDataSource: cannot flatten rgItem.VINS to item.VINS", diags))
|
||||
}
|
||||
item.ResTypes, diagsItem = types.ListValueFrom(ctx, types.StringType, rgItem.ResTypes)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGListDeletedDataSource: cannot flatten rgItem.ResTypes to item.ResTypes", diags))
|
||||
}
|
||||
item.VMS, diagsItem = types.ListValueFrom(ctx, types.Int64Type, rgItem.Computes)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGListDeletedDataSource: cannot flatten rgItem.Computes to item.Computes", diags))
|
||||
}
|
||||
item.ComputeFeatures, diagsItem = types.ListValueFrom(ctx, types.StringType, rgItem.ComputeFeatures)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGListDeletedDataSource: cannot flatten rgItem.ComputeFeatures to item.ComputeFeatures", diags))
|
||||
}
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
state.Items = items
|
||||
state.EntryCount = types.Int64Value(int64(rgList.EntryCount))
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListDeletedDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGListDeletedDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,172 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/utilities"
|
||||
)
|
||||
|
||||
// RGListLBDataSource flattens data source for rg list lb.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGListLBDataSource(ctx context.Context, state *models.DataSourceRGListLBModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGListLBDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgListLB, err := utilities.RGListLBCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group list lb", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListLBDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGListLBModel{
|
||||
RGID: state.RGID,
|
||||
ByID: state.ByID,
|
||||
Name: state.Name,
|
||||
TechStatus: state.TechStatus,
|
||||
Status: state.Status,
|
||||
FrontIP: state.FrontIP,
|
||||
BackIP: state.BackIP,
|
||||
SortBy: state.SortBy,
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemsRGListLBModel, 0, len(rgListLB.Data))
|
||||
for _, lbItem := range rgListLB.Data {
|
||||
item := models.ItemsRGListLBModel{
|
||||
HAMode: types.BoolValue(lbItem.HAMode),
|
||||
// ACL: flattenACL(ctx, lbItem.ACL), not added because platform returns interface{}
|
||||
BackendHAIP: types.StringValue(lbItem.BackendHAIP),
|
||||
CreatedBy: types.StringValue(lbItem.CreatedBy),
|
||||
CreatedTime: types.Int64Value(int64(lbItem.CreatedTime)),
|
||||
DeletedBy: types.StringValue(lbItem.DeletedBy),
|
||||
DeletedTime: types.Int64Value(int64(lbItem.DeletedTime)),
|
||||
Description: types.StringValue(lbItem.Description),
|
||||
DPAPIUser: types.StringValue(lbItem.DPAPIUser),
|
||||
ExtNetID: types.Int64Value(int64(lbItem.ExtNetID)),
|
||||
FrontendHAIP: types.StringValue(lbItem.FrontendHAIP),
|
||||
GID: types.Int64Value(int64(lbItem.GID)),
|
||||
GUID: types.Int64Value(int64(lbItem.GUID)),
|
||||
ID: types.Int64Value(int64(lbItem.ID)),
|
||||
ImageID: types.Int64Value(int64(lbItem.ImageID)),
|
||||
Milestones: types.Int64Value(int64(lbItem.Milestones)),
|
||||
Name: types.StringValue(lbItem.Name),
|
||||
PrimaryNode: models.RecordNodeModel{
|
||||
BackendIP: types.StringValue(lbItem.PrimaryNode.BackendIP),
|
||||
ComputeID: types.Int64Value(int64(lbItem.PrimaryNode.ComputeID)),
|
||||
FrontendIP: types.StringValue(lbItem.PrimaryNode.FrontendIP),
|
||||
GUID: types.StringValue(lbItem.PrimaryNode.GUID),
|
||||
MGMTIP: types.StringValue(lbItem.PrimaryNode.MGMTIP),
|
||||
NetworkID: types.Int64Value(int64(lbItem.PrimaryNode.NetworkID)),
|
||||
},
|
||||
RGName: types.StringValue(lbItem.RGName),
|
||||
SecondaryNode: models.RecordNodeModel{
|
||||
BackendIP: types.StringValue(lbItem.SecondaryNode.BackendIP),
|
||||
ComputeID: types.Int64Value(int64(lbItem.SecondaryNode.ComputeID)),
|
||||
FrontendIP: types.StringValue(lbItem.SecondaryNode.FrontendIP),
|
||||
GUID: types.StringValue(lbItem.SecondaryNode.GUID),
|
||||
MGMTIP: types.StringValue(lbItem.SecondaryNode.MGMTIP),
|
||||
NetworkID: types.Int64Value(int64(lbItem.SecondaryNode.NetworkID)),
|
||||
},
|
||||
Status: types.StringValue(lbItem.Status),
|
||||
TechStatus: types.StringValue(lbItem.TechStatus),
|
||||
UpdatedBy: types.StringValue(lbItem.UpdatedBy),
|
||||
UpdatedTime: types.Int64Value(int64(lbItem.UpdatedTime)),
|
||||
VINSID: types.Int64Value(int64(lbItem.VINSID)),
|
||||
}
|
||||
|
||||
// flatten Backends
|
||||
backends := make([]models.ItemBackendModel, 0, len(lbItem.Backends))
|
||||
for _, backendItem := range lbItem.Backends {
|
||||
be := models.ItemBackendModel{
|
||||
Algorithm: types.StringValue(backendItem.Algorithm),
|
||||
GUID: types.StringValue(backendItem.GUID),
|
||||
Name: types.StringValue(backendItem.Name),
|
||||
ServerDefaultSettings: models.RecordServerSettingsModel{
|
||||
Inter: types.Int64Value(int64(backendItem.ServerDefaultSettings.Inter)),
|
||||
GUID: types.StringValue(backendItem.ServerDefaultSettings.GUID),
|
||||
DownInter: types.Int64Value(int64(backendItem.ServerDefaultSettings.DownInter)),
|
||||
Rise: types.Int64Value(int64(backendItem.ServerDefaultSettings.Rise)),
|
||||
Fall: types.Int64Value(int64(backendItem.ServerDefaultSettings.Fall)),
|
||||
SlowStart: types.Int64Value(int64(backendItem.ServerDefaultSettings.SlowStart)),
|
||||
MaxConn: types.Int64Value(int64(backendItem.ServerDefaultSettings.MaxConn)),
|
||||
MaxQueue: types.Int64Value(int64(backendItem.ServerDefaultSettings.MaxQueue)),
|
||||
Weight: types.Int64Value(int64(backendItem.ServerDefaultSettings.Weight)),
|
||||
},
|
||||
}
|
||||
|
||||
servers := make([]models.ItemServerModel, 0, len(backendItem.Servers))
|
||||
for _, server := range backendItem.Servers {
|
||||
s := models.ItemServerModel{
|
||||
Address: types.StringValue(server.Address),
|
||||
Check: types.StringValue(server.Check),
|
||||
GUID: types.StringValue(server.GUID),
|
||||
Name: types.StringValue(server.Name),
|
||||
Port: types.Int64Value(int64(server.Port)),
|
||||
ServerSettings: models.RecordServerSettingsModel{
|
||||
Inter: types.Int64Value(int64(server.ServerSettings.Inter)),
|
||||
GUID: types.StringValue(server.ServerSettings.GUID),
|
||||
DownInter: types.Int64Value(int64(server.ServerSettings.DownInter)),
|
||||
Rise: types.Int64Value(int64(server.ServerSettings.Rise)),
|
||||
Fall: types.Int64Value(int64(server.ServerSettings.Fall)),
|
||||
SlowStart: types.Int64Value(int64(server.ServerSettings.SlowStart)),
|
||||
MaxConn: types.Int64Value(int64(server.ServerSettings.MaxConn)),
|
||||
MaxQueue: types.Int64Value(int64(server.ServerSettings.MaxQueue)),
|
||||
Weight: types.Int64Value(int64(server.ServerSettings.Weight)),
|
||||
},
|
||||
}
|
||||
servers = append(servers, s)
|
||||
}
|
||||
be.Servers = servers
|
||||
backends = append(backends, be)
|
||||
}
|
||||
item.Backends = backends
|
||||
|
||||
// flatten Frontends
|
||||
frontends := make([]models.ItemFrontendModel, 0, len(lbItem.Frontends))
|
||||
for _, frontendItem := range lbItem.Frontends {
|
||||
fr := models.ItemFrontendModel{
|
||||
Backend: types.StringValue(frontendItem.Backend),
|
||||
GUID: types.StringValue(frontendItem.GUID),
|
||||
Name: types.StringValue(frontendItem.Name),
|
||||
}
|
||||
bindings := make([]models.ItemBindingModel, 0, len(frontendItem.Bindings))
|
||||
for _, bingingItem := range frontendItem.Bindings {
|
||||
b := models.ItemBindingModel{
|
||||
Address: types.StringValue(bingingItem.Address),
|
||||
GUID: types.StringValue(bingingItem.GUID),
|
||||
Name: types.StringValue(bingingItem.Name),
|
||||
Port: types.Int64Value(int64(bingingItem.Port)),
|
||||
}
|
||||
bindings = append(bindings, b)
|
||||
}
|
||||
fr.Bindings = bindings
|
||||
frontends = append(frontends, fr)
|
||||
}
|
||||
item.Frontends = frontends
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
state.Items = items
|
||||
state.EntryCount = types.Int64Value(int64(rgListLB.EntryCount))
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListLBDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGListLBDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/utilities"
|
||||
)
|
||||
|
||||
// RGListPFWDataSource flattens data source for rg list pfw.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGListPFWDataSource(ctx context.Context, state *models.DataSourceRGListPFWModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGListPFWDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgListPFW, err := utilities.RGListPFWCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group list pfw", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListPFWDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGListPFWModel{
|
||||
RGID: state.RGID,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemsRGListPFWModel, 0, len(rgListPFW.Data))
|
||||
for _, pfwItem := range rgListPFW.Data {
|
||||
item := models.ItemsRGListPFWModel{
|
||||
PublicPortEnd: types.Int64Value(int64(pfwItem.PublicPortEnd)),
|
||||
PublicPortStart: types.Int64Value(int64(pfwItem.PublicPortStart)),
|
||||
VMID: types.Int64Value(int64(pfwItem.VMID)),
|
||||
VMIP: types.StringValue(pfwItem.VMIP),
|
||||
VMName: types.StringValue(pfwItem.VMName),
|
||||
VMPort: types.Int64Value(int64(pfwItem.VMPort)),
|
||||
VINSID: types.Int64Value(int64(pfwItem.VINSID)),
|
||||
VINSName: types.StringValue(pfwItem.VINSName),
|
||||
}
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
state.Items = items
|
||||
state.EntryCount = types.Int64Value(int64(rgListPFW.EntryCount))
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListPFWDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGListPFWDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,79 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/utilities"
|
||||
)
|
||||
|
||||
// RGListVinsDataSource flattens data source for rg list.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGListVinsDataSource(ctx context.Context, state *models.DataSourceRGListVinsModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGListVinsDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgListVins, err := utilities.RGListVinsCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group list vins", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListVinsDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGListVinsModel{
|
||||
RGID: state.RGID,
|
||||
Name: state.Name,
|
||||
AccountID: state.AccountID,
|
||||
ExtIP: state.ExtIP,
|
||||
VINSID: state.VINSID,
|
||||
SortBy: state.SortBy,
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemsRGListVinsModel, 0, len(rgListVins.Data))
|
||||
for _, vinsItem := range rgListVins.Data {
|
||||
item := models.ItemsRGListVinsModel{
|
||||
AccountID: types.Int64Value(int64(vinsItem.AccountID)),
|
||||
AccountName: types.StringValue(vinsItem.AccountName),
|
||||
Computes: types.Int64Value(int64(vinsItem.Computes)),
|
||||
CreatedBy: types.StringValue(vinsItem.CreatedBy),
|
||||
CreatedTime: types.Int64Value(int64(vinsItem.CreatedTime)),
|
||||
DeletedBy: types.StringValue(vinsItem.DeletedBy),
|
||||
DeletedTime: types.Int64Value(int64(vinsItem.DeletedTime)),
|
||||
ExternalIP: types.StringValue(vinsItem.ExternalIP),
|
||||
ExtnetID: types.Int64Value(int64(vinsItem.ExtnetId)),
|
||||
FreeIPs: types.Int64Value(int64(vinsItem.FreeIPs)),
|
||||
ID: types.Int64Value(int64(vinsItem.ID)),
|
||||
Name: types.StringValue(vinsItem.Name),
|
||||
Network: types.StringValue(vinsItem.Network),
|
||||
PriVNFDevID: types.Int64Value(int64(vinsItem.PriVNFDevID)),
|
||||
RGName: types.StringValue(vinsItem.RGName),
|
||||
Status: types.StringValue(vinsItem.Status),
|
||||
UpdatedBy: types.StringValue(vinsItem.UpdatedBy),
|
||||
UpdatedTime: types.Int64Value(int64(vinsItem.UpdatedTime)),
|
||||
}
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
state.Items = items
|
||||
state.EntryCount = types.Int64Value(int64(rgListVins.EntryCount))
|
||||
|
||||
tflog.Info(ctx, "flattens.RGListVinsDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGListVinsDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,108 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/utilities"
|
||||
)
|
||||
|
||||
// RGResourceConsumptionListDataSource flattens data source for rg resource consumption list.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGResourceConsumptionListDataSource(ctx context.Context, state *models.DataSourceRGResourceConsumptionListModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGResourceConsumptionListDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
resConsList, err := utilities.RGResourceConsumptionListCheckPresence(ctx, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group resource consumption list", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGResourceConsumptionListDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGResourceConsumptionListModel{
|
||||
Timeouts: state.Timeouts,
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
items := make([]models.ItemResourceConsumptionModel, 0, len(resConsList.Data))
|
||||
for _, resConsItem := range resConsList.Data {
|
||||
item := models.ItemResourceConsumptionModel{
|
||||
RGID: types.Int64Value(int64(resConsItem.RGID)),
|
||||
Consumed: &models.ResourceModel{
|
||||
CPU: types.Int64Value(resConsItem.Consumed.CPU),
|
||||
DiskSize: types.Float64Value(resConsItem.Consumed.DiskSize),
|
||||
DiskSizeMax: types.Float64Value(resConsItem.Consumed.DiskSizeMax),
|
||||
ExtIPs: types.Int64Value(resConsItem.Consumed.ExtIPs),
|
||||
ExtTraffic: types.Int64Value(resConsItem.Consumed.ExtTraffic),
|
||||
GPU: types.Int64Value(resConsItem.Consumed.GPU),
|
||||
RAM: types.Int64Value(resConsItem.Consumed.RAM),
|
||||
},
|
||||
Reserved: &models.ResourceModel{
|
||||
CPU: types.Int64Value(resConsItem.Reserved.CPU),
|
||||
DiskSize: types.Float64Value(resConsItem.Reserved.DiskSize),
|
||||
DiskSizeMax: types.Float64Value(resConsItem.Reserved.DiskSizeMax),
|
||||
ExtIPs: types.Int64Value(resConsItem.Reserved.ExtIPs),
|
||||
ExtTraffic: types.Int64Value(resConsItem.Reserved.ExtTraffic),
|
||||
GPU: types.Int64Value(resConsItem.Reserved.GPU),
|
||||
RAM: types.Int64Value(resConsItem.Reserved.RAM),
|
||||
},
|
||||
ResourceLimits: &models.ResourceLimitsModel{
|
||||
CUC: types.Float64Value(resConsItem.ResourceLimits.CUC),
|
||||
CUD: types.Float64Value(resConsItem.ResourceLimits.CUD),
|
||||
CUDM: types.Float64Value(resConsItem.ResourceLimits.CUDM),
|
||||
CUI: types.Float64Value(resConsItem.ResourceLimits.CUI),
|
||||
CUM: types.Float64Value(resConsItem.ResourceLimits.CUM),
|
||||
CUNP: types.Float64Value(resConsItem.ResourceLimits.CUNP),
|
||||
GPUUnits: types.Float64Value(resConsItem.ResourceLimits.GPUUnits),
|
||||
},
|
||||
}
|
||||
|
||||
sepsConsumed := make([]models.SEPsModel, 0, len(resConsItem.Consumed.SEPs))
|
||||
for sepId, data := range resConsItem.Consumed.SEPs {
|
||||
for dataName, diskData := range data {
|
||||
sepItem := models.SEPsModel{
|
||||
SepID: types.StringValue(sepId),
|
||||
DataName: types.StringValue(dataName),
|
||||
DiskSize: types.Float64Value(diskData.DiskSize),
|
||||
DiskSizeMax: types.Float64Value(diskData.DiskSizeMax),
|
||||
}
|
||||
sepsConsumed = append(sepsConsumed, sepItem)
|
||||
}
|
||||
}
|
||||
item.Consumed.SEPs = sepsConsumed
|
||||
|
||||
sepsReserved := make([]models.SEPsModel, 0, len(resConsItem.Reserved.SEPs))
|
||||
for sepId, data := range resConsItem.Reserved.SEPs {
|
||||
for dataName, diskData := range data {
|
||||
sepItem := models.SEPsModel{
|
||||
SepID: types.StringValue(sepId),
|
||||
DataName: types.StringValue(dataName),
|
||||
DiskSize: types.Float64Value(diskData.DiskSize),
|
||||
DiskSizeMax: types.Float64Value(diskData.DiskSizeMax),
|
||||
}
|
||||
sepsReserved = append(sepsReserved, sepItem)
|
||||
}
|
||||
}
|
||||
item.Reserved.SEPs = sepsReserved
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
state.Items = items
|
||||
state.EntryCount = types.Int64Value(int64(resConsList.EntryCount))
|
||||
|
||||
tflog.Info(ctx, "flattens.RGResourceConsumptionListDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGResourceConsumptionListDataSource")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,65 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/utilities"
|
||||
)
|
||||
|
||||
// RGUsageDataSource flattens data source for rg usage.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGUsageDataSource(ctx context.Context, state *models.DataSourceRGUsageModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGUsageDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
usageInfo, err := utilities.RGUsageCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about resource group usage", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGUsageDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceRGUsageModel{
|
||||
RGID: state.RGID,
|
||||
Reason: state.Reason,
|
||||
Timeouts: state.Timeouts,
|
||||
Id: types.StringValue(id.String()),
|
||||
|
||||
CPU: types.Int64Value(int64(usageInfo.CPU)),
|
||||
DiskSize: types.Int64Value(int64(usageInfo.DiskSize)),
|
||||
DiskSizeMax: types.Int64Value(int64(usageInfo.DiskSizeMax)),
|
||||
ExtIPs: types.Int64Value(int64(usageInfo.ExtIPs)),
|
||||
ExtTraffic: types.Int64Value(int64(usageInfo.ExtraTraffic)),
|
||||
GPU: types.Int64Value(int64(usageInfo.GPU)),
|
||||
RAM: types.Int64Value(int64(usageInfo.RAM)),
|
||||
}
|
||||
|
||||
seps := make([]models.SEPsModel, 0, len(usageInfo.SEPs))
|
||||
for sepId, data := range usageInfo.SEPs {
|
||||
for dataName, diskData := range data {
|
||||
sepItem := models.SEPsModel{
|
||||
SepID: types.StringValue(sepId),
|
||||
DataName: types.StringValue(dataName),
|
||||
DiskSize: types.Float64Value(diskData.DiskSize),
|
||||
DiskSizeMax: types.Float64Value(diskData.DiskSizeMax),
|
||||
}
|
||||
seps = append(seps, sepItem)
|
||||
}
|
||||
}
|
||||
state.SEPs = seps
|
||||
|
||||
tflog.Info(ctx, "flattens.RGUsageDataSource: after flatten")
|
||||
|
||||
tflog.Info(ctx, "End flattens.RGUsageDataSource")
|
||||
return nil
|
||||
}
|
||||
153
internal/service/cloudapi/rg/flattens/flatten_resource_rg.go
Normal file
153
internal/service/cloudapi/rg/flattens/flatten_resource_rg.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/utilities"
|
||||
)
|
||||
|
||||
// RGResource flattens resource for rg (resource group).
|
||||
// Return error in case resource is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func RGResource(ctx context.Context, plan *models.ResourceRGModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.RGResource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgId, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot parse resource group ID from state", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
recordRG, err := utilities.RGCheckPresence(ctx, rgId, c)
|
||||
if err != nil {
|
||||
diags.AddError(fmt.Sprintf("Cannot get info about resource group with ID %v", rgId), err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.RGResource: before flatten", map[string]any{"rg_id": rgId, "recordRG": recordRG})
|
||||
|
||||
*plan = models.ResourceRGModel{
|
||||
AccountID: types.Int64Value(int64(recordRG.AccountID)),
|
||||
GID: types.Int64Value(int64(recordRG.GID)),
|
||||
Name: types.StringValue(recordRG.Name),
|
||||
|
||||
DefNetType: plan.DefNetType,
|
||||
IPCIDR: plan.IPCIDR,
|
||||
Quota: flattenQuota(ctx, &recordRG.ResourceLimits),
|
||||
ExtNetID: plan.ExtNetID,
|
||||
ExtIP: plan.ExtIP,
|
||||
Owner: plan.Owner,
|
||||
Access: plan.Access,
|
||||
DefNet: plan.DefNet,
|
||||
Description: plan.Description,
|
||||
Force: plan.Force,
|
||||
Permanently: plan.Permanently,
|
||||
Reason: plan.Reason,
|
||||
RegisterComputes: plan.RegisterComputes,
|
||||
Restore: plan.Restore,
|
||||
Enable: plan.Enable,
|
||||
Timeouts: plan.Timeouts,
|
||||
|
||||
RGID: types.Int64Value(int64(recordRG.ID)),
|
||||
LastUpdated: plan.LastUpdated,
|
||||
AccountName: types.StringValue(recordRG.AccountName),
|
||||
ACL: flattenACL(ctx, &recordRG.ACL),
|
||||
CPUAllocationParameter: types.StringValue(recordRG.CPUAllocationParameter),
|
||||
CPUAllocationRatio: types.Float64Value(recordRG.CPUAllocationRatio),
|
||||
DefNetID: types.Int64Value(recordRG.DefNetID),
|
||||
DeletedBy: types.StringValue(recordRG.DeletedBy),
|
||||
DeletedTime: types.Int64Value(int64(recordRG.DeletedTime)),
|
||||
Dirty: types.BoolValue(recordRG.Dirty),
|
||||
GUID: types.Int64Value(int64(recordRG.GUID)),
|
||||
Id: types.StringValue(strconv.Itoa(int(recordRG.ID))),
|
||||
LockStatus: types.StringValue(recordRG.LockStatus),
|
||||
Milestones: types.Int64Value(int64(recordRG.Milestones)),
|
||||
Secret: types.StringValue(recordRG.Secret),
|
||||
Status: types.StringValue(recordRG.Status),
|
||||
UpdatedBy: types.StringValue(recordRG.UpdatedBy),
|
||||
UpdatedTime: types.Int64Value(int64(recordRG.UpdatedTime)),
|
||||
}
|
||||
|
||||
var diagsItem diag.Diagnostics
|
||||
plan.UniqPools, diagsItem = types.ListValueFrom(ctx, types.StringType, recordRG.UniqPools)
|
||||
if diagsItem != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGResource: cannot flatten recordRG.UniqPools to plan.UniqPools", diags))
|
||||
}
|
||||
plan.VINS, diagsItem = types.ListValueFrom(ctx, types.Int64Type, recordRG.VINS)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGResource: cannot flatten recordRG.VINS to plan.VINS", diags))
|
||||
}
|
||||
plan.ResTypes, diagsItem = types.ListValueFrom(ctx, types.StringType, recordRG.ResTypes)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGResource: cannot flatten recordRG.ResTypes to plan.ResTypes", diags))
|
||||
}
|
||||
plan.VMS, diagsItem = types.ListValueFrom(ctx, types.Int64Type, recordRG.Computes)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGResource: cannot flatten recordRG.Computes to plan.Computes", diags))
|
||||
}
|
||||
plan.ComputeFeatures, diagsItem = types.ListValueFrom(ctx, types.StringType, recordRG.ComputeFeatures)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.RGResource: cannot flatten recordRG.ComputeFeatures to recordRG.ComputeFeatures", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattenResourceRG: after flatten", map[string]any{"rg_id": plan.Id.ValueString()})
|
||||
|
||||
tflog.Info(ctx, "End FlattenRGResource")
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenACL(ctx context.Context, item *rg.ListACL) types.List {
|
||||
tflog.Info(ctx, "Start flattenACLItems")
|
||||
tempSlice := make([]types.Object, 0, len(*item))
|
||||
for _, aclItem := range *item {
|
||||
temp := models.ItemACLModel{
|
||||
Explicit: types.BoolValue(aclItem.Explicit),
|
||||
GUID: types.StringValue(aclItem.GUID),
|
||||
Right: types.StringValue(aclItem.Right),
|
||||
Status: types.StringValue(aclItem.Status),
|
||||
Type: types.StringValue(aclItem.Type),
|
||||
UserGroupID: types.StringValue(aclItem.UserGroupID),
|
||||
}
|
||||
obj, diags := types.ObjectValueFrom(ctx, models.ItemACL, temp)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenACLItems struct to obj", diags))
|
||||
}
|
||||
tempSlice = append(tempSlice, obj)
|
||||
}
|
||||
|
||||
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemACL}, tempSlice)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenACLItems", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenACLItems")
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenQuota(ctx context.Context, item *rg.ResourceLimits) types.Object {
|
||||
tflog.Info(ctx, "Start flattenQuota")
|
||||
tempStruct := models.QuotaModel{
|
||||
CPU: types.Int64Value(int64(item.CUC)),
|
||||
Ram: types.Int64Value(int64(item.CUM)),
|
||||
Disk: types.Int64Value(int64(item.CUDM)),
|
||||
ExtTraffic: types.Int64Value(int64(item.CUNP)),
|
||||
ExtIps: types.Int64Value(int64(item.CUI)),
|
||||
GpuUnits: types.Int64Value(int64(item.GPUUnits)),
|
||||
CuD: types.Int64Value(int64(item.CUD)),
|
||||
}
|
||||
quota, diags := types.ObjectValueFrom(ctx, models.ItemQuota, tempStruct)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenQuota", diags))
|
||||
}
|
||||
return quota
|
||||
}
|
||||
41
internal/service/cloudapi/rg/input_checks.go
Normal file
41
internal/service/cloudapi/rg/input_checks.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/ic"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
)
|
||||
|
||||
func resourceRgInputChecks(ctx context.Context, plan *models.ResourceRGModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
accountId := uint64(plan.AccountID.ValueInt64())
|
||||
tflog.Info(ctx, "resourceRgInputChecks: exist account check", map[string]any{"account_id": accountId})
|
||||
err := ic.ExistAccount(ctx, accountId, c)
|
||||
if err != nil {
|
||||
diags.AddError(fmt.Sprintf("Cannot get info about account with ID %v", accountId), err.Error())
|
||||
}
|
||||
|
||||
gid := uint64(plan.GID.ValueInt64())
|
||||
tflog.Info(ctx, "resourceRgInputChecks: exist gid check", map[string]any{"gid": gid})
|
||||
err = ic.ExistGID(ctx, gid, c)
|
||||
if err != nil {
|
||||
diags.AddError(fmt.Sprintf("Cannot get info about gid with ID %v", gid), err.Error())
|
||||
}
|
||||
|
||||
if !plan.ExtNetID.IsNull() {
|
||||
extnetId := uint64(plan.ExtNetID.ValueInt64())
|
||||
tflog.Info(ctx, "resourceRgInputChecks: exist ext_net check", map[string]any{"ext_net_id": extnetId})
|
||||
err = ic.ExistExtNetInRG(ctx, extnetId, accountId, c)
|
||||
if err != nil {
|
||||
diags.AddError(fmt.Sprintf("Cannot get info about ext net with ID %v", extnetId), err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
55
internal/service/cloudapi/rg/models/model_data_source_rg.go
Normal file
55
internal/service/cloudapi/rg/models/model_data_source_rg.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGModel struct {
|
||||
// request fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
Reason types.String `tfsdk:"reason"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
ACL types.List `tfsdk:"acl"`
|
||||
ComputeFeatures types.List `tfsdk:"compute_features"`
|
||||
CPUAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"`
|
||||
CPUAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"`
|
||||
CreatedBy types.String `tfsdk:"created_by"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DefNetID types.Int64 `tfsdk:"def_net_id"`
|
||||
DefNetType types.String `tfsdk:"def_net_type"`
|
||||
DeletedBy types.String `tfsdk:"deleted_by"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
Description types.String `tfsdk:"desc"`
|
||||
Dirty types.Bool `tfsdk:"dirty"`
|
||||
GID types.Int64 `tfsdk:"gid"`
|
||||
GUID types.Int64 `tfsdk:"guid"`
|
||||
Id types.String `tfsdk:"id"`
|
||||
LockStatus types.String `tfsdk:"lock_status"`
|
||||
Milestones types.Int64 `tfsdk:"milestones"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
RegisterComputes types.Bool `tfsdk:"register_computes"`
|
||||
ResourceLimits *ResourceLimitsModel `tfsdk:"resource_limits"`
|
||||
ResTypes types.List `tfsdk:"res_types"`
|
||||
Secret types.String `tfsdk:"secret"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
UniqPools types.List `tfsdk:"uniq_pools"`
|
||||
UpdatedBy types.String `tfsdk:"updated_by"`
|
||||
UpdatedTime types.Int64 `tfsdk:"updated_time"`
|
||||
VINS types.List `tfsdk:"vins"`
|
||||
Computes types.List `tfsdk:"computes"`
|
||||
}
|
||||
|
||||
type ResourceLimitsModel struct {
|
||||
CUC types.Float64 `tfsdk:"cu_c"`
|
||||
CUD types.Float64 `tfsdk:"cu_d"`
|
||||
CUDM types.Float64 `tfsdk:"cu_dm"`
|
||||
CUI types.Float64 `tfsdk:"cu_i"`
|
||||
CUM types.Float64 `tfsdk:"cu_m"`
|
||||
CUNP types.Float64 `tfsdk:"cu_np"`
|
||||
GPUUnits types.Float64 `tfsdk:"gpu_units"`
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGAffinityGroupComputesModel struct {
|
||||
// request required fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
AffinityGroup types.String `tfsdk:"affinity_group"`
|
||||
|
||||
// request optional fields
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemAffinityGroupComputeModel `tfsdk:"items"`
|
||||
}
|
||||
|
||||
type ItemAffinityGroupComputeModel struct {
|
||||
ComputeID types.Int64 `tfsdk:"compute_id"`
|
||||
OtherNode types.List `tfsdk:"other_node"`
|
||||
OtherNodeIndirect types.List `tfsdk:"other_node_indirect"`
|
||||
OtherNodeIndirectSoft types.List `tfsdk:"other_node_indirect_soft"`
|
||||
OtherNodeSoft types.List `tfsdk:"other_node_soft"`
|
||||
SameNode types.List `tfsdk:"same_node"`
|
||||
SameNodeSoft types.List `tfsdk:"same_node_soft"`
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGAffinityGroupsGetModel struct {
|
||||
// request required fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
AffinityGroup types.String `tfsdk:"affinity_group"`
|
||||
|
||||
// request optional fields
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Ids types.List `tfsdk:"ids"`
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGAffinityGroupsListModel struct {
|
||||
// request required fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
|
||||
// request optional fields
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
AffinityGroups []ItemAffinityGroupModel `tfsdk:"affinity_groups"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemAffinityGroupModel struct {
|
||||
Label types.String `tfsdk:"label"`
|
||||
Ids []ItemIDModel `tfsdk:"ids"`
|
||||
}
|
||||
|
||||
type ItemIDModel struct {
|
||||
Id types.Int64 `tfsdk:"id"`
|
||||
NodeId types.Int64 `tfsdk:"node_id"`
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGAuditsModel struct {
|
||||
// request field
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemsRGAuditModel `tfsdk:"items"`
|
||||
}
|
||||
|
||||
type ItemsRGAuditModel struct {
|
||||
Call types.String `tfsdk:"call"`
|
||||
ResponseTime types.Float64 `tfsdk:"responsetime"`
|
||||
StatusCode types.Int64 `tfsdk:"statuscode"`
|
||||
Timestamp types.Float64 `tfsdk:"timestamp"`
|
||||
User types.String `tfsdk:"user"`
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGGetResourceConsumptionModel struct {
|
||||
// request fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Consumed *ResourceModel `tfsdk:"consumed"`
|
||||
Reserved *ResourceModel `tfsdk:"reserved"`
|
||||
ResourceLimits *ResourceLimitsModel `tfsdk:"resource_limits"`
|
||||
}
|
||||
|
||||
type ResourceModel struct {
|
||||
CPU types.Int64 `tfsdk:"cpu"`
|
||||
DiskSize types.Float64 `tfsdk:"disk_size"`
|
||||
DiskSizeMax types.Float64 `tfsdk:"disk_size_max"`
|
||||
ExtIPs types.Int64 `tfsdk:"extips"`
|
||||
ExtTraffic types.Int64 `tfsdk:"exttraffic"`
|
||||
GPU types.Int64 `tfsdk:"gpu"`
|
||||
RAM types.Int64 `tfsdk:"ram"`
|
||||
SEPs []SEPsModel `tfsdk:"seps"`
|
||||
}
|
||||
|
||||
type SEPsModel struct {
|
||||
SepID types.String `tfsdk:"sep_id"`
|
||||
DataName types.String `tfsdk:"data_name"`
|
||||
DiskSize types.Float64 `tfsdk:"disk_size"`
|
||||
DiskSizeMax types.Float64 `tfsdk:"disk_size_max"`
|
||||
}
|
||||
@@ -0,0 +1,70 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGListModel struct {
|
||||
// request optional fields
|
||||
ById types.Int64 `tfsdk:"by_id"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
AccountId types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
CreatedAfter types.Int64 `tfsdk:"created_after"`
|
||||
CreatedBefore types.Int64 `tfsdk:"created_before"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
LockStatus types.String `tfsdk:"lock_status"`
|
||||
IncludeDeleted types.Bool `tfsdk:"includedeleted"`
|
||||
SortBy types.String `tfsdk:"sort_by"`
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemsRGListModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemsRGListModel struct {
|
||||
AccountACL types.List `tfsdk:"account_acl"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
ComputeFeatures types.List `tfsdk:"compute_features"`
|
||||
CPUAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"`
|
||||
CPUAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"`
|
||||
CreatedBy types.String `tfsdk:"created_by"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DefNetID types.Int64 `tfsdk:"def_net_id"`
|
||||
DefNetType types.String `tfsdk:"def_net_type"`
|
||||
DeletedBy types.String `tfsdk:"deleted_by"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
Description types.String `tfsdk:"desc"`
|
||||
Dirty types.Bool `tfsdk:"dirty"`
|
||||
GID types.Int64 `tfsdk:"gid"`
|
||||
GUID types.Int64 `tfsdk:"guid"`
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
LockStatus types.String `tfsdk:"lock_status"`
|
||||
Milestones types.Int64 `tfsdk:"milestones"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
RegisterComputes types.Bool `tfsdk:"register_computes"`
|
||||
ResourceLimits *ResourceLimitsModel `tfsdk:"resource_limits"`
|
||||
ResTypes types.List `tfsdk:"resource_types"`
|
||||
Secret types.String `tfsdk:"secret"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
UniqPools types.List `tfsdk:"uniq_pools"`
|
||||
UpdatedBy types.String `tfsdk:"updated_by"`
|
||||
UpdatedTime types.Int64 `tfsdk:"updated_time"`
|
||||
VINS types.List `tfsdk:"vins"`
|
||||
VMS types.List `tfsdk:"vms"`
|
||||
}
|
||||
|
||||
type AccountACLModel struct {
|
||||
Explicit types.Bool `tfsdk:"explicit"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Right types.String `tfsdk:"right"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
Type types.String `tfsdk:"type"`
|
||||
UserGroupID types.String `tfsdk:"user_group_id"`
|
||||
}
|
||||
@@ -0,0 +1,75 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/attr"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGListComputesModel struct {
|
||||
// request required fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
|
||||
// request optional fields
|
||||
ComputeID types.Int64 `tfsdk:"compute_id"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
TechStatus types.String `tfsdk:"tech_status"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
IPAddress types.String `tfsdk:"ip_address"`
|
||||
ExtNetName types.String `tfsdk:"extnet_name"`
|
||||
ExtNetID types.Int64 `tfsdk:"extnet_id"`
|
||||
SortBy types.String `tfsdk:"sort_by"`
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemsRGListComputeModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemsRGListComputeModel struct {
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
AffinityLabel types.String `tfsdk:"affinity_label"`
|
||||
AffinityRules types.List `tfsdk:"affinity_rules"`
|
||||
AffinityWeight types.Int64 `tfsdk:"affinity_weight"`
|
||||
AntiAffinityRules types.List `tfsdk:"antiaffinity_rules"`
|
||||
CPUs types.Int64 `tfsdk:"cpus"`
|
||||
CreatedBy types.String `tfsdk:"created_by"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DeletedBy types.String `tfsdk:"deleted_by"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
ID types.Int64 `tfsdk:"id"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
RAM types.Int64 `tfsdk:"ram"`
|
||||
Registered types.Bool `tfsdk:"registered"`
|
||||
RGName types.String `tfsdk:"rg_name"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
TechStatus types.String `tfsdk:"tech_status"`
|
||||
TotalDisksSize types.Int64 `tfsdk:"total_disks_size"`
|
||||
UpdatedBy types.String `tfsdk:"updated_by"`
|
||||
UpdatedTime types.Int64 `tfsdk:"updated_time"`
|
||||
UserManaged types.Bool `tfsdk:"user_managed"`
|
||||
VINSConnected types.Int64 `tfsdk:"vins_connected"`
|
||||
}
|
||||
|
||||
type AffinityRuleModel struct {
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Key types.String `tfsdk:"key"`
|
||||
Mode types.String `tfsdk:"mode"`
|
||||
Policy types.String `tfsdk:"policy"`
|
||||
Topology types.String `tfsdk:"topology"`
|
||||
Value types.String `tfsdk:"value"`
|
||||
}
|
||||
|
||||
var ItemAffinityRule = map[string]attr.Type{
|
||||
"guid": types.StringType,
|
||||
"key": types.StringType,
|
||||
"mode": types.StringType,
|
||||
"policy": types.StringType,
|
||||
"topology": types.StringType,
|
||||
"value": types.StringType,
|
||||
}
|
||||
@@ -0,0 +1,59 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGListDeletedModel struct {
|
||||
// request optional fields
|
||||
ById types.Int64 `tfsdk:"by_id"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
AccountId types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
CreatedAfter types.Int64 `tfsdk:"created_after"`
|
||||
CreatedBefore types.Int64 `tfsdk:"created_before"`
|
||||
LockStatus types.String `tfsdk:"lock_status"`
|
||||
SortBy types.String `tfsdk:"sort_by"`
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemsRGListDeletedModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemsRGListDeletedModel struct {
|
||||
AccountACL types.List `tfsdk:"account_acl"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
ComputeFeatures types.List `tfsdk:"compute_features"`
|
||||
CPUAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"`
|
||||
CPUAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"`
|
||||
CreatedBy types.String `tfsdk:"created_by"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DefNetID types.Int64 `tfsdk:"def_net_id"`
|
||||
DefNetType types.String `tfsdk:"def_net_type"`
|
||||
DeletedBy types.String `tfsdk:"deleted_by"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
Description types.String `tfsdk:"desc"`
|
||||
Dirty types.Bool `tfsdk:"dirty"`
|
||||
GID types.Int64 `tfsdk:"gid"`
|
||||
GUID types.Int64 `tfsdk:"guid"`
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
LockStatus types.String `tfsdk:"lock_status"`
|
||||
Milestones types.Int64 `tfsdk:"milestones"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
RegisterComputes types.Bool `tfsdk:"register_computes"`
|
||||
ResourceLimits *ResourceLimitsModel `tfsdk:"resource_limits"`
|
||||
ResTypes types.List `tfsdk:"resource_types"`
|
||||
Secret types.String `tfsdk:"secret"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
UniqPools types.List `tfsdk:"uniq_pools"`
|
||||
UpdatedBy types.String `tfsdk:"updated_by"`
|
||||
UpdatedTime types.Int64 `tfsdk:"updated_time"`
|
||||
VINS types.List `tfsdk:"vins"`
|
||||
VMS types.List `tfsdk:"vms"`
|
||||
}
|
||||
@@ -0,0 +1,109 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGListLBModel struct {
|
||||
// request required fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
|
||||
// request optional fields
|
||||
ByID types.Int64 `tfsdk:"by_id"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
TechStatus types.String `tfsdk:"tech_status"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
FrontIP types.String `tfsdk:"front_ip"`
|
||||
BackIP types.String `tfsdk:"back_ip"`
|
||||
SortBy types.String `tfsdk:"sort_by"`
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemsRGListLBModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemsRGListLBModel struct {
|
||||
HAMode types.Bool `tfsdk:"ha_mode"`
|
||||
BackendHAIP types.String `tfsdk:"backend_haip"`
|
||||
Backends []ItemBackendModel `tfsdk:"backends"`
|
||||
CreatedBy types.String `tfsdk:"created_by"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DeletedBy types.String `tfsdk:"deleted_by"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
Description types.String `tfsdk:"desc"`
|
||||
DPAPIUser types.String `tfsdk:"dp_api_user"`
|
||||
ExtNetID types.Int64 `tfsdk:"extnet_id"`
|
||||
FrontendHAIP types.String `tfsdk:"frontend_haip"`
|
||||
Frontends []ItemFrontendModel `tfsdk:"frontends"`
|
||||
GID types.Int64 `tfsdk:"gid"`
|
||||
GUID types.Int64 `tfsdk:"guid"`
|
||||
ID types.Int64 `tfsdk:"id"`
|
||||
ImageID types.Int64 `tfsdk:"image_id"`
|
||||
Milestones types.Int64 `tfsdk:"milestones"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
PrimaryNode RecordNodeModel `tfsdk:"primary_node"`
|
||||
RGName types.String `tfsdk:"rg_name"`
|
||||
SecondaryNode RecordNodeModel `tfsdk:"secondary_node"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
TechStatus types.String `tfsdk:"tech_status"`
|
||||
UpdatedBy types.String `tfsdk:"updated_by"`
|
||||
UpdatedTime types.Int64 `tfsdk:"updated_time"`
|
||||
VINSID types.Int64 `tfsdk:"vins_id"`
|
||||
}
|
||||
|
||||
type ItemFrontendModel struct {
|
||||
Backend types.String `tfsdk:"backend"`
|
||||
Bindings []ItemBindingModel `tfsdk:"bindings"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
}
|
||||
|
||||
type ItemBindingModel struct {
|
||||
Address types.String `tfsdk:"address"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
Port types.Int64 `tfsdk:"port"`
|
||||
}
|
||||
|
||||
type RecordNodeModel struct {
|
||||
BackendIP types.String `tfsdk:"backend_ip"`
|
||||
ComputeID types.Int64 `tfsdk:"compute_id"`
|
||||
FrontendIP types.String `tfsdk:"frontend_ip"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
MGMTIP types.String `tfsdk:"mgmt_ip"`
|
||||
NetworkID types.Int64 `tfsdk:"network_id"`
|
||||
}
|
||||
|
||||
type ItemBackendModel struct {
|
||||
Algorithm types.String `tfsdk:"algorithm"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
ServerDefaultSettings RecordServerSettingsModel `tfsdk:"server_default_settings"`
|
||||
Servers []ItemServerModel `tfsdk:"servers"`
|
||||
}
|
||||
|
||||
type RecordServerSettingsModel struct {
|
||||
Inter types.Int64 `tfsdk:"inter"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
DownInter types.Int64 `tfsdk:"down_inter"`
|
||||
Rise types.Int64 `tfsdk:"rise"`
|
||||
Fall types.Int64 `tfsdk:"fall"`
|
||||
SlowStart types.Int64 `tfsdk:"slow_start"`
|
||||
MaxConn types.Int64 `tfsdk:"max_conn"`
|
||||
MaxQueue types.Int64 `tfsdk:"max_queue"`
|
||||
Weight types.Int64 `tfsdk:"weight"`
|
||||
}
|
||||
|
||||
type ItemServerModel struct {
|
||||
Address types.String `tfsdk:"address"`
|
||||
Check types.String `tfsdk:"check"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
Port types.Int64 `tfsdk:"port"`
|
||||
ServerSettings RecordServerSettingsModel `tfsdk:"server_settings"`
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGListPFWModel struct {
|
||||
// request fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"` // required
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"` // optional
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemsRGListPFWModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemsRGListPFWModel struct {
|
||||
PublicPortEnd types.Int64 `tfsdk:"public_port_end"`
|
||||
PublicPortStart types.Int64 `tfsdk:"public_port_start"`
|
||||
VMID types.Int64 `tfsdk:"vm_id"`
|
||||
VMIP types.String `tfsdk:"vm_ip"`
|
||||
VMName types.String `tfsdk:"vm_name"`
|
||||
VMPort types.Int64 `tfsdk:"vm_port"`
|
||||
VINSID types.Int64 `tfsdk:"vins_id"`
|
||||
VINSName types.String `tfsdk:"vins_name"`
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGListVinsModel struct {
|
||||
// request required fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
|
||||
// request optional fields
|
||||
Name types.String `tfsdk:"name"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
ExtIP types.String `tfsdk:"ext_ip"`
|
||||
VINSID types.Int64 `tfsdk:"vins_id"`
|
||||
SortBy types.String `tfsdk:"sort_by"`
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemsRGListVinsModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemsRGListVinsModel struct {
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
Computes types.Int64 `tfsdk:"computes"`
|
||||
CreatedBy types.String `tfsdk:"created_by"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DeletedBy types.String `tfsdk:"deleted_by"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
ExternalIP types.String `tfsdk:"external_ip"`
|
||||
ExtnetID types.Int64 `tfsdk:"extnet_id"`
|
||||
FreeIPs types.Int64 `tfsdk:"free_ips"`
|
||||
ID types.Int64 `tfsdk:"id"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
Network types.String `tfsdk:"network"`
|
||||
PriVNFDevID types.Int64 `tfsdk:"pri_vnf_dev_id"`
|
||||
RGName types.String `tfsdk:"rg_name"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
UpdatedBy types.String `tfsdk:"updated_by"`
|
||||
UpdatedTime types.Int64 `tfsdk:"updated_time"`
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGResourceConsumptionListModel struct {
|
||||
// request fields
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemResourceConsumptionModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemResourceConsumptionModel struct {
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
Consumed *ResourceModel `tfsdk:"consumed"`
|
||||
Reserved *ResourceModel `tfsdk:"reserved"`
|
||||
ResourceLimits *ResourceLimitsModel `tfsdk:"resource_limits"`
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceRGUsageModel struct {
|
||||
// request fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
Reason types.String `tfsdk:"reason"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
CPU types.Int64 `tfsdk:"cpu"`
|
||||
DiskSize types.Int64 `tfsdk:"disk_size"`
|
||||
DiskSizeMax types.Int64 `tfsdk:"disk_size_max"`
|
||||
ExtIPs types.Int64 `tfsdk:"extips"`
|
||||
ExtTraffic types.Int64 `tfsdk:"exttraffic"`
|
||||
GPU types.Int64 `tfsdk:"gpu"`
|
||||
RAM types.Int64 `tfsdk:"ram"`
|
||||
SEPs []SEPsModel `tfsdk:"seps"`
|
||||
}
|
||||
129
internal/service/cloudapi/rg/models/model_resource_rg.go
Normal file
129
internal/service/cloudapi/rg/models/model_resource_rg.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/attr"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type ResourceRGModel struct {
|
||||
// request fields - required
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
GID types.Int64 `tfsdk:"gid"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
|
||||
// request fields - optional
|
||||
DefNetType types.String `tfsdk:"def_net_type"`
|
||||
IPCIDR types.String `tfsdk:"ipcidr"`
|
||||
ExtNetID types.Int64 `tfsdk:"ext_net_id"`
|
||||
ExtIP types.String `tfsdk:"ext_ip"`
|
||||
Owner types.String `tfsdk:"owner"`
|
||||
Quota types.Object `tfsdk:"quota"`
|
||||
Access types.List `tfsdk:"access"`
|
||||
DefNet types.Object `tfsdk:"def_net"`
|
||||
Description types.String `tfsdk:"description"`
|
||||
Force types.Bool `tfsdk:"force"`
|
||||
Permanently types.Bool `tfsdk:"permanently"`
|
||||
Reason types.String `tfsdk:"reason"`
|
||||
RegisterComputes types.Bool `tfsdk:"register_computes"`
|
||||
Restore types.Bool `tfsdk:"restore"`
|
||||
Enable types.Bool `tfsdk:"enable"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
RGID types.Int64 `tfsdk:"rg_id"`
|
||||
LastUpdated types.String `tfsdk:"last_updated"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
ACL types.List `tfsdk:"acl"`
|
||||
ComputeFeatures types.List `tfsdk:"compute_features"`
|
||||
CPUAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"`
|
||||
CPUAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"`
|
||||
DefNetID types.Int64 `tfsdk:"def_net_id"`
|
||||
DeletedBy types.String `tfsdk:"deleted_by"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
Dirty types.Bool `tfsdk:"dirty"`
|
||||
GUID types.Int64 `tfsdk:"guid"`
|
||||
Id types.String `tfsdk:"id"`
|
||||
LockStatus types.String `tfsdk:"lock_status"`
|
||||
Milestones types.Int64 `tfsdk:"milestones"`
|
||||
ResTypes types.List `tfsdk:"res_types"`
|
||||
Secret types.String `tfsdk:"secret"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
UniqPools types.List `tfsdk:"uniq_pools"`
|
||||
UpdatedBy types.String `tfsdk:"updated_by"`
|
||||
UpdatedTime types.Int64 `tfsdk:"updated_time"`
|
||||
VINS types.List `tfsdk:"vins"`
|
||||
VMS types.List `tfsdk:"vms"`
|
||||
}
|
||||
|
||||
type ItemACLModel struct {
|
||||
Explicit types.Bool `tfsdk:"explicit"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Right types.String `tfsdk:"right"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
Type types.String `tfsdk:"type"`
|
||||
UserGroupID types.String `tfsdk:"user_group_id"`
|
||||
}
|
||||
|
||||
type QuotaModel struct {
|
||||
CPU types.Int64 `tfsdk:"cpu"`
|
||||
Ram types.Int64 `tfsdk:"ram"`
|
||||
Disk types.Int64 `tfsdk:"disk"`
|
||||
ExtTraffic types.Int64 `tfsdk:"ext_traffic"`
|
||||
ExtIps types.Int64 `tfsdk:"ext_ips"`
|
||||
GpuUnits types.Int64 `tfsdk:"gpu_units"`
|
||||
CuD types.Int64 `tfsdk:"cu_d"`
|
||||
}
|
||||
|
||||
type AccessModel struct {
|
||||
User types.String `tfsdk:"user"`
|
||||
Right types.String `tfsdk:"right"`
|
||||
Reason types.String `tfsdk:"reason"`
|
||||
}
|
||||
|
||||
type DefNetModel struct {
|
||||
NetType types.String `tfsdk:"net_type"`
|
||||
NetId types.Int64 `tfsdk:"net_id"`
|
||||
Reason types.String `tfsdk:"reason"`
|
||||
}
|
||||
|
||||
var ItemAccess = map[string]attr.Type{
|
||||
"user": types.StringType,
|
||||
"right": types.StringType,
|
||||
"reason": types.StringType,
|
||||
}
|
||||
|
||||
var ItemDefNet = map[string]attr.Type{
|
||||
"net_type": types.StringType,
|
||||
"net_id": types.Int64Type,
|
||||
"reason": types.StringType,
|
||||
}
|
||||
|
||||
var ItemACL = map[string]attr.Type{
|
||||
"explicit": types.BoolType,
|
||||
"guid": types.StringType,
|
||||
"right": types.StringType,
|
||||
"status": types.StringType,
|
||||
"type": types.StringType,
|
||||
"user_group_id": types.StringType,
|
||||
}
|
||||
|
||||
var ItemQuota = map[string]attr.Type{
|
||||
"cpu": types.Int64Type,
|
||||
"ram": types.Int64Type,
|
||||
"disk": types.Int64Type,
|
||||
"ext_traffic": types.Int64Type,
|
||||
"ext_ips": types.Int64Type,
|
||||
"gpu_units": types.Int64Type,
|
||||
"cu_d": types.Int64Type,
|
||||
}
|
||||
|
||||
// Contains returns true if accessList contains a as an element. Otherwise it returns false.
|
||||
func (a *AccessModel) Contains(accessList []AccessModel) bool {
|
||||
for _, accessElem := range accessList {
|
||||
if a.User.Equal(accessElem.User) && a.Right.Equal(accessElem.Right) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
410
internal/service/cloudapi/rg/resource_rg.go
Normal file
410
internal/service/cloudapi/rg/resource_rg.go
Normal file
@@ -0,0 +1,410 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/path"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/schemas"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/utilities"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ resource.Resource = &resourceRG{}
|
||||
_ resource.ResourceWithImportState = &resourceRG{}
|
||||
)
|
||||
|
||||
// NewResourceRG is a helper function to simplify the provider implementation.
|
||||
func NewResourceRG() resource.Resource {
|
||||
return &resourceRG{}
|
||||
}
|
||||
|
||||
// resourceRG is the resource implementation.
|
||||
type resourceRG struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
// Create creates the resource and sets the initial Terraform state.
|
||||
func (r *resourceRG) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
|
||||
// Get plan to create resource group
|
||||
var plan models.ResourceRGModel
|
||||
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Create resourceRG: Error receiving the plan")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Create resourceRG: got plan successfully", map[string]any{"name": plan.Name.ValueString()})
|
||||
tflog.Info(ctx, "Create resourceRG: start creating", map[string]any{"name": plan.Name.ValueString()})
|
||||
|
||||
// Set timeouts
|
||||
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Create resourceRG: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Create resourceRG: set timeouts successfully", map[string]any{
|
||||
"name": plan.Name.ValueString(),
|
||||
"createTimeout": createTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, createTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Check if input values are valid in the platform
|
||||
tflog.Info(ctx, "Create resourceRG: starting input checks", map[string]any{"name": plan.Name.ValueString()})
|
||||
resp.Diagnostics.Append(resourceRgInputChecks(ctx, &plan, r.client)...)
|
||||
if diags.HasError() {
|
||||
tflog.Error(ctx, "Create resourceRG: Error input checks")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Create resourceRG: input checks successful", map[string]any{"name": plan.Name.ValueString()})
|
||||
|
||||
// Make create request and get response
|
||||
createReq, diags := utilities.CreateRequestResourceRG(ctx, &plan)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if diags.HasError() {
|
||||
tflog.Error(ctx, "Create resourceRG: Error response for create request of resource rg")
|
||||
return
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "Create resourceRG: before call CloudAPI().RG().Create", map[string]any{"req": createReq})
|
||||
rgId, err := r.client.CloudAPI().RG().Create(ctx, createReq)
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError(
|
||||
"Create resourceRG: unable to Create RG",
|
||||
err.Error(),
|
||||
)
|
||||
return
|
||||
}
|
||||
plan.Id = types.StringValue(strconv.Itoa(int(rgId)))
|
||||
tflog.Info(ctx, "Create resourceRG: resource group created", map[string]any{"rgId": rgId, "name": plan.Name.ValueString()})
|
||||
|
||||
// additional settings after rg creation: in case of failures, warnings are added to resp.Diagnostics,
|
||||
// because additional settings failure is not critical. If errors were added instead of warnings, terraform
|
||||
// framework would mark resource as tainted and delete it, which would be unwanted behaviour.
|
||||
|
||||
// grant access to resource group if needed, warnings added to resp.Diagnostics in case of failure.
|
||||
resp.Diagnostics.Append(utilities.AccessCreateRG(ctx, rgId, &plan, r.client)...)
|
||||
|
||||
// set def_net for resource group if needed, warnings added to resp.Diagnostics in case of failure.
|
||||
resp.Diagnostics.Append(utilities.SetDefNetCreateRG(ctx, rgId, &plan, r.client)...)
|
||||
|
||||
// enable/disable of resource group after creation, warnings added to resp.Diagnostics in case of failure.
|
||||
resp.Diagnostics.Append(utilities.EnableDisableCreateRG(ctx, rgId, &plan, r.client)...)
|
||||
|
||||
tflog.Info(ctx, "Create resourceRG: resource creation is completed", map[string]any{"rg_id": rgId})
|
||||
|
||||
// Map response body to schema and populate Computed attribute values
|
||||
resp.Diagnostics.Append(flattens.RGResource(ctx, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
// Set data last update
|
||||
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
|
||||
|
||||
// Set state to fully populated data
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Read refreshes the Terraform state with the latest data.
|
||||
func (r *resourceRG) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
|
||||
// Get current state
|
||||
var state models.ResourceRGModel
|
||||
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceRG: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read resourceRG: got state successfully", map[string]any{"rg_id": state.Id.ValueString()})
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceRG: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read resourceRG: set timeouts successfully", map[string]any{
|
||||
"rg_id": state.Id.ValueString(),
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// read status
|
||||
resp.Diagnostics.Append(utilities.RGReadStatus(ctx, &state, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceRG: Error reading resource group status")
|
||||
return
|
||||
}
|
||||
|
||||
// Overwrite items with refreshed state
|
||||
resp.Diagnostics.Append(flattens.RGResource(ctx, &state, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceRG: Error flatten resource group")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceRG: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read resource group")
|
||||
}
|
||||
|
||||
// Update updates the resource and sets the updated Terraform state on success.
|
||||
func (r *resourceRG) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
|
||||
// Retrieve values from plan
|
||||
var plan models.ResourceRGModel
|
||||
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceRG: Error receiving the plan")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Update resourceRG: got plan successfully", map[string]any{"rg_id": plan.Id.ValueString()})
|
||||
|
||||
// Retrieve values from state
|
||||
var state models.ResourceRGModel
|
||||
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceRG: Error receiving the state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Update resourceRG: got state successfully", map[string]any{"rg_id": state.Id.ValueString()})
|
||||
|
||||
// Set timeouts
|
||||
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout600s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Update resourceRG: set timeouts successfully", map[string]any{
|
||||
"rg_id": state.Id.ValueString(),
|
||||
"updateTimeout": updateTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Checking for values in the platform
|
||||
tflog.Info(ctx, "Update resourceRG: starting input checks", map[string]any{"rg_id": plan.Id.ValueString()})
|
||||
resp.Diagnostics.Append(resourceRgInputChecks(ctx, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceRG: Error input checks")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Update resourceRG: input checks successful", map[string]any{"rg_id": state.Id.ValueString()})
|
||||
|
||||
rgId, err := strconv.ParseUint(state.Id.ValueString(), 10, 64)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot parse resource group ID from state", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Get current resource group values
|
||||
recordRG, err := utilities.RGCheckPresence(ctx, rgId, r.client)
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError(
|
||||
"Update resourceRG: unable to Update RG after input checks",
|
||||
err.Error(),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "Update resourceRG: check status for RG", map[string]any{"rg_id": recordRG.ID, "status": recordRG.Status})
|
||||
|
||||
// Validate if changes in plan are allowed
|
||||
tflog.Info(ctx, "Update resourceRG: checking def_net is not empty in case of change", map[string]any{
|
||||
"rg_id": state.Id.ValueString()})
|
||||
if !state.DefNet.IsNull() && plan.DefNet.IsNull() {
|
||||
resp.Diagnostics.AddError(
|
||||
"Update resourceRG: Invalid input provided",
|
||||
fmt.Sprintf("block def_net must not be empty for resource with rg_id %d", recordRG.ID),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "Update resourceRG: checking def_net_type, ipcidr, ext_ip are not changed", map[string]any{
|
||||
"rg_id": state.Id.ValueString(),
|
||||
"def_net_type_plan": plan.DefNetType.ValueString(),
|
||||
"def_net_type_state": state.DefNetType.ValueString(),
|
||||
"ipcidr_plan": plan.IPCIDR.ValueString(),
|
||||
"ipcidr_state": state.IPCIDR.ValueString(),
|
||||
"ext_ip_plan": plan.ExtIP.ValueString(),
|
||||
"ext_ip_state": state.ExtIP.ValueString(),
|
||||
})
|
||||
|
||||
if !plan.DefNetType.Equal(state.DefNetType) {
|
||||
resp.Diagnostics.AddWarning(
|
||||
"Update resourceRG: Invalid input provided. Warning can be ignored if resource was imported.",
|
||||
fmt.Sprintf("block def_net_type must not be changed for resource with rg_id %d", recordRG.ID),
|
||||
)
|
||||
}
|
||||
|
||||
if !plan.IPCIDR.Equal(state.IPCIDR) {
|
||||
resp.Diagnostics.AddError(
|
||||
"Update resourceRG: Invalid input provided",
|
||||
fmt.Sprintf("block ipcidr must not be changed for resource with rg_id %d", recordRG.ID),
|
||||
)
|
||||
return
|
||||
}
|
||||
if !plan.ExtIP.Equal(state.ExtIP) {
|
||||
resp.Diagnostics.AddError(
|
||||
"Update resourceRG: Invalid input provided",
|
||||
fmt.Sprintf("block ext_ip must not be changed for resource with rg_id %d", recordRG.ID),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// update RG if any of the fields name, description, register_computes or quota has been changed
|
||||
resp.Diagnostics.Append(utilities.UpdateRG(ctx, rgId, &plan, &state, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceRG: Error updating rg")
|
||||
return
|
||||
}
|
||||
|
||||
// enable or disable RG
|
||||
if !plan.Enable.Equal(state.Enable) {
|
||||
resp.Diagnostics.Append(utilities.EnableDisableUpdateRG(ctx, rgId, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceRG: Error enable/disable rg")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// grant/revoke access for RG
|
||||
if !reflect.DeepEqual(plan.Access, state.Access) {
|
||||
resp.Diagnostics.Append(utilities.AccessUpdateRG(ctx, rgId, &plan, &state, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceRG: Error grant/revoke access for rg")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// set new def_net is needed
|
||||
if !reflect.DeepEqual(plan.DefNet, state.DefNet) {
|
||||
resp.Diagnostics.Append(utilities.SetDefNetUpdateRG(ctx, rgId, &plan, &state, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceRG: Unable to setDefNet for RG")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "Update resourceRG: resource update is completed", map[string]any{"rg_id": plan.Id.ValueString()})
|
||||
|
||||
// Map response body to schema and populate Computed attribute values
|
||||
resp.Diagnostics.Append(flattens.RGResource(ctx, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
// Set data last update
|
||||
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
|
||||
|
||||
// Set state to fully populated data
|
||||
diags = resp.State.Set(ctx, plan)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Delete deletes the resource and removes the Terraform state on success.
|
||||
func (r *resourceRG) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
|
||||
// Get current state
|
||||
var state models.ResourceRGModel
|
||||
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Delete resourceRG: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Delete resourceRG: got state successfully", map[string]any{"rg_id": state.Id.ValueString()})
|
||||
|
||||
// Set timeouts
|
||||
deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Delete resourceRG: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Delete resourceRG: set timeouts successfully", map[string]any{
|
||||
"rg_id": state.Id.ValueString(),
|
||||
"deleteTimeout": deleteTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Delete existing resource group
|
||||
delReq := rg.DeleteRequest{
|
||||
RGID: uint64(state.RGID.ValueInt64()),
|
||||
}
|
||||
|
||||
if state.Force.IsNull() {
|
||||
delReq.Force = true // default value
|
||||
} else {
|
||||
delReq.Force = state.Force.ValueBool()
|
||||
}
|
||||
if state.Permanently.IsNull() {
|
||||
delReq.Permanently = true // default value
|
||||
} else {
|
||||
delReq.Permanently = state.Permanently.ValueBool()
|
||||
}
|
||||
if !state.Reason.IsNull() {
|
||||
delReq.Reason = state.Reason.ValueString()
|
||||
}
|
||||
_, err := r.client.CloudAPI().RG().Delete(ctx, delReq)
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError("Delete resourceRG: Error deleting resource group with error: ", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End delete resource group", map[string]any{"rg_id": state.Id.ValueString()})
|
||||
}
|
||||
|
||||
// Schema defines the schema for the resource.
|
||||
func (r *resourceRG) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaResourceRG(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Metadata returns the resource type name.
|
||||
func (r *resourceRG) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_resgroup"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the resource.
|
||||
func (r *resourceRG) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure resourceRG")
|
||||
r.client = client.Resource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure resourceRG successfully")
|
||||
}
|
||||
|
||||
func (r *resourceRG) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
|
||||
// Retrieve import ID and save to id attribute
|
||||
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
|
||||
}
|
||||
164
internal/service/cloudapi/rg/schemas/schema_data_source_rg.go
Normal file
164
internal/service/cloudapi/rg/schemas/schema_data_source_rg.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRG() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "resource group id",
|
||||
},
|
||||
|
||||
// optional attributes
|
||||
"reason": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "reason for request",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"account_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"acl": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"explicit": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"right": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"user_group_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"compute_features": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"cpu_allocation_parameter": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cpu_allocation_ratio": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"desc": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"dirty": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"lock_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"register_computes": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"resource_limits": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cu_c": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_d": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_dm": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_i": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_m": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_np": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu_units": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"secret": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vins": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"computes": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"res_types": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"uniq_pools": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,59 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGAffinityGroupComputes() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "find by rg id",
|
||||
},
|
||||
"affinity_group": schema.StringAttribute{
|
||||
Required: true,
|
||||
Description: "Affinity group label",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"compute_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"other_node": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"other_node_indirect": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"other_node_indirect_soft": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"other_node_soft": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"same_node": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"same_node_soft": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGAffinityGroupsGet() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "find by rg id",
|
||||
},
|
||||
"affinity_group": schema.StringAttribute{
|
||||
Required: true,
|
||||
Description: "Affinity group label",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"ids": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGAffinityGroupsList() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "find by rg id",
|
||||
},
|
||||
|
||||
// optional attributes
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "page number",
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "size number",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"affinity_groups": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"label": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"ids": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"node_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGAudits() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"call": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"responsetime": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"statuscode": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"user": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,136 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGGetResourceConsumption() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "find by rg id",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"consumed": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"extips": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"exttraffic": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"ram": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"seps": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"sep_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"reserved": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"extips": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"exttraffic": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"ram": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"seps": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"sep_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"resource_limits": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cu_c": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_d": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_dm": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_i": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_m": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_np": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu_units": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,215 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGList() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// optional attributes
|
||||
"by_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by id",
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by name",
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by account id",
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by account name",
|
||||
},
|
||||
"created_after": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by created after time (unix timestamp)",
|
||||
},
|
||||
"created_before": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by created before time (unix timestamp)",
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by status",
|
||||
},
|
||||
"lock_status": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by lock status",
|
||||
},
|
||||
"includedeleted": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
Description: "included deleted resource groups. If using field 'status', then includedeleted will be ignored",
|
||||
},
|
||||
"sort_by": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "sort by one of supported fields, format +|-(field)",
|
||||
},
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "page number",
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "size number",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"account_acl": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"explicit": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"right": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"user_group_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"compute_features": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"created_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"desc": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"dirty": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"lock_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"register_computes": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"resource_limits": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cu_c": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_d": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_dm": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_i": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_m": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_np": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu_units": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"secret": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vins": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"vms": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"resource_types": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"cpu_allocation_parameter": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cpu_allocation_ratio": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"uniq_pools": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,189 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGListComputes() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "ID of the RG",
|
||||
},
|
||||
|
||||
// optional attributes
|
||||
"compute_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by id",
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by name",
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by account id",
|
||||
},
|
||||
"tech_status": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by tech status",
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by status",
|
||||
},
|
||||
"ip_address": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by ip address",
|
||||
},
|
||||
"extnet_name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by external network name",
|
||||
},
|
||||
"extnet_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by external network id",
|
||||
},
|
||||
"sort_by": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "sort by one of supported fields, format +|-(field)",
|
||||
},
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "page number",
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "size number",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"account_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"affinity_label": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"affinity_rules": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"key": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"mode": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"policy": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"topology": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"value": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"affinity_weight": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"antiaffinity_rules": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"key": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"mode": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"policy": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"topology": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"value": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"cpus": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"ram": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"registered": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"rg_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_disks_size": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"user_managed": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vins_connected": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,207 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGListDeleted() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// optional attributes
|
||||
"by_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by id",
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by name",
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by account id",
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by account name",
|
||||
},
|
||||
"created_after": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by created after time (unix timestamp)",
|
||||
},
|
||||
"created_before": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by created before time (unix timestamp)",
|
||||
},
|
||||
"lock_status": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by lock status",
|
||||
},
|
||||
"sort_by": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "sort by one of supported fields, format +|-(field)",
|
||||
},
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "page number",
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "size number",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"account_acl": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"explicit": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"right": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"user_group_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"compute_features": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"created_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"desc": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"dirty": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"lock_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"register_computes": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"resource_limits": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cu_c": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_d": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_dm": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_i": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_m": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_np": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu_units": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"secret": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vins": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"vms": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"resource_types": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"cpu_allocation_parameter": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cpu_allocation_ratio": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"uniq_pools": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,343 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGListLB() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "ID of the RG",
|
||||
},
|
||||
|
||||
// optional attributes
|
||||
"by_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by id",
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by name",
|
||||
},
|
||||
"tech_status": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by tech status",
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by status",
|
||||
},
|
||||
"front_ip": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by frontend Ip",
|
||||
},
|
||||
"back_ip": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by backend Ip",
|
||||
},
|
||||
"sort_by": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "sort by one of supported fields, format +|-(field)",
|
||||
},
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "page number",
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "size number",
|
||||
},
|
||||
|
||||
// computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"ha_mode": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
// acl is not added because platform returns interface{}
|
||||
//"acl": schema.ListNestedAttribute{
|
||||
// Computed: true,
|
||||
// NestedObject: schema.NestedAttributeObject{
|
||||
// Attributes: map[string]schema.Attribute{
|
||||
// "explicit": schema.BoolAttribute{
|
||||
// Computed: true,
|
||||
// },
|
||||
// "guid": schema.StringAttribute{
|
||||
// Computed: true,
|
||||
// },
|
||||
// "right": schema.StringAttribute{
|
||||
// Computed: true,
|
||||
// },
|
||||
// "status": schema.StringAttribute{
|
||||
// Computed: true,
|
||||
// },
|
||||
// "type": schema.StringAttribute{
|
||||
// Computed: true,
|
||||
// },
|
||||
// "user_group_id": schema.StringAttribute{
|
||||
// Computed: true,
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
//},
|
||||
"backend_haip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"backends": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"algorithm": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"server_default_settings": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"inter": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"down_inter": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"rise": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"fall": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"slow_start": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"max_conn": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"max_queue": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"weight": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"servers": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"address": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"check": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"port": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"server_settings": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"inter": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"down_inter": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"rise": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"fall": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"slow_start": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"max_conn": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"max_queue": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"weight": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"created_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"desc": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"dp_api_user": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"extnet_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"frontend_haip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"frontends": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"backend": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"bindings": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"address": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"port": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"gid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"image_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"primary_node": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"backend_ip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"compute_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"frontend_ip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"mgmt_ip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"network_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"rg_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"secondary_node": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"backend_ip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"compute_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"frontend_ip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"mgmt_ip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"network_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vins_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,54 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGListPFW() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "ID of the RG",
|
||||
},
|
||||
|
||||
// computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"public_port_end": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"public_port_start": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vm_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vm_ip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vm_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vm_port": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vins_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vins_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,114 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGListVins() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "ID of the RG",
|
||||
},
|
||||
|
||||
// optional attributes
|
||||
"name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by name",
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by account id",
|
||||
},
|
||||
"ext_ip": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by external ip address",
|
||||
},
|
||||
"vins_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by vins id",
|
||||
},
|
||||
"sort_by": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "sort by one of supported fields, format +|-(field)",
|
||||
},
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "page number",
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "size number",
|
||||
},
|
||||
|
||||
// computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"account_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"computes": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"external_ip": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"extnet_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"free_ips": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"network": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pri_vnf_dev_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"rg_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,143 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGResourceConsumptionList() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"consumed": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"extips": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"exttraffic": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"ram": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"seps": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"sep_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"reserved": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"extips": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"exttraffic": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"ram": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"seps": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"sep_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"resource_limits": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cu_c": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_d": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_dm": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_i": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_m": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cu_np": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu_units": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,66 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceRGUsage() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "find by rg id",
|
||||
},
|
||||
|
||||
// optional attributes
|
||||
"reason": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "reason for action",
|
||||
},
|
||||
|
||||
//computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"extips": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"exttraffic": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gpu": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"ram": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"seps": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"sep_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
275
internal/service/cloudapi/rg/schemas/schema_resource_rg.go
Normal file
275
internal/service/cloudapi/rg/schemas/schema_resource_rg.go
Normal file
@@ -0,0 +1,275 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
|
||||
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaResourceRG() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"account_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "account, which will own this resource group",
|
||||
},
|
||||
"gid": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "grid id",
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Required: true,
|
||||
Description: "name of this RG. Must be unique within the account.",
|
||||
},
|
||||
|
||||
// optional attributes
|
||||
"def_net_type": schema.StringAttribute{
|
||||
// attribute is only Optional (not Computed) on purpose. If Computed is added, errors occur during Create and
|
||||
// Update of the resource in case "def_net"."net_type" is different from "def_net_type". Terraform framework
|
||||
// produces "Error: Provider produced inconsistent results after apply" if plan values and resulting values
|
||||
// are different, and this is exactly what happens if Computed and Optional field can be updated indirectly
|
||||
// (via "def_net","net_type" in our case).
|
||||
Optional: true,
|
||||
Description: "type of the default network for this RG. VMs created in this RG will be by default connected to this network. Allowed values are PRIVATE, PUBLIC, NONE.",
|
||||
Validators: []validator.String{
|
||||
stringvalidator.OneOf("PRIVATE", "PUBLIC", "NONE"), // case is not ignored
|
||||
},
|
||||
// default value is "PRIVATE".
|
||||
},
|
||||
"ipcidr": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "private network IP CIDR if default network PRIVATE",
|
||||
},
|
||||
"ext_net_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "external network id",
|
||||
// default value is 0.
|
||||
},
|
||||
"ext_ip": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "IP address on the external network to request when def_net_type=PRIVATE and ext_net_id is not 0.",
|
||||
},
|
||||
"owner": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "username - owner of this RG. Leave blank to set current user as owner",
|
||||
},
|
||||
"quota": schema.SingleNestedAttribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Quota settings for this resource group.",
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"cpu": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Limit on the total number of CPUs in this resource group.",
|
||||
},
|
||||
"ram": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Limit on the total amount of RAM in this resource group, specified in MB.",
|
||||
},
|
||||
"disk": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Limit on the total volume of storage resources in this resource group, specified in GB.",
|
||||
},
|
||||
"ext_traffic": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Limit on the total ingress network traffic for this resource group, specified in GB.",
|
||||
},
|
||||
"ext_ips": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Limit on the total number of external IP addresses this resource group can use.",
|
||||
},
|
||||
"gpu_units": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
Description: "Limit on the total number of virtual GPUs this resource group can use.",
|
||||
},
|
||||
"cu_d": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
Description: "Limit on the total volume of storage resources in this resource group, specified in GB.",
|
||||
},
|
||||
},
|
||||
},
|
||||
"access": schema.ListNestedAttribute{
|
||||
Optional: true,
|
||||
Description: "Grant/revoke user or group access to the Resource group as specified",
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"user": schema.StringAttribute{
|
||||
Required: true,
|
||||
Description: "User or group name to grant access",
|
||||
},
|
||||
"right": schema.StringAttribute{
|
||||
Required: true,
|
||||
Description: "Access rights to set, one of 'R', 'RCX' or 'ARCXDU'",
|
||||
},
|
||||
"reason": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "Reason for action",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"def_net": schema.SingleNestedAttribute{
|
||||
Optional: true,
|
||||
Description: "Set default network for attach associated VMs",
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"net_type": schema.StringAttribute{
|
||||
Required: true,
|
||||
Validators: []validator.String{
|
||||
stringvalidator.OneOf("PRIVATE", "PUBLIC"), // case is not ignored
|
||||
},
|
||||
Description: "Network type to set. Must be on of 'PRIVATE' or 'PUBLIC'.",
|
||||
},
|
||||
"net_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "Network segment ID. If netType is PUBLIC and netId is 0 then default external network segment will be selected. If netType is PRIVATE and netId=0, the first ViNS defined for this RG will be selected. Otherwise, netId identifies either existing external network segment or ViNS.",
|
||||
// default value is 0
|
||||
},
|
||||
"reason": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "Reason for action",
|
||||
},
|
||||
},
|
||||
},
|
||||
"description": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "User-defined text description of this resource group.",
|
||||
},
|
||||
"force": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
Description: "Set to True if you want force delete non-empty RG",
|
||||
// default value is true
|
||||
},
|
||||
"permanently": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
Description: "Set to True if you want force delete non-empty RG",
|
||||
// default value is true
|
||||
},
|
||||
"reason": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "Set to True if you want force delete non-empty RG",
|
||||
},
|
||||
"register_computes": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
Description: "Register computes in registration system",
|
||||
// default value is false
|
||||
},
|
||||
"restore": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
// default value is true
|
||||
},
|
||||
"enable": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
Description: "flag for enable/disable RG",
|
||||
// default value is true
|
||||
},
|
||||
|
||||
// computed attributes
|
||||
"rg_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"last_updated": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"acl": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"explicit": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"right": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"user_group_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"compute_features": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"cpu_allocation_parameter": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"cpu_allocation_ratio": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"dirty": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
PlanModifiers: []planmodifier.String{
|
||||
stringplanmodifier.UseStateForUnknown(),
|
||||
},
|
||||
},
|
||||
"lock_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"secret": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vins": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"vms": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"res_types": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"uniq_pools": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
)
|
||||
|
||||
func RGAffinityGroupComputesCheckPresence(ctx context.Context, plan *models.DataSourceRGAffinityGroupComputesModel, c *decort.DecortClient) (*rg.ListAffinityGroupsComputes, error) {
|
||||
agCompsReq := rg.AffinityGroupComputesRequest{
|
||||
RGID: uint64(plan.RGID.ValueInt64()),
|
||||
AffinityGroup: plan.AffinityGroup.ValueString(),
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGAffinityGroupComputesCheckPresence: before call CloudAPI().RG().AffinityGroupComputes", map[string]any{"req": agCompsReq})
|
||||
agCompsList, err := c.CloudAPI().RG().AffinityGroupComputes(ctx, agCompsReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group affinity group computes with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGAffinityGroupComputesCheckPresence: response from CloudAPI().RG().AffinityGroupComputes", map[string]any{"response": agCompsList})
|
||||
|
||||
return &agCompsList, err
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
)
|
||||
|
||||
func RGAffinityGroupsGetCheckPresence(ctx context.Context, plan *models.DataSourceRGAffinityGroupsGetModel, c *decort.DecortClient) ([]uint64, error) {
|
||||
agReq := rg.AffinityGroupsGetRequest{
|
||||
RGID: uint64(plan.RGID.ValueInt64()),
|
||||
AffinityGroup: plan.AffinityGroup.ValueString(),
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGAffinityGroupsGetCheckPresence: before call CloudAPI().RG().AffinityGroupsGet", map[string]any{"req": agReq})
|
||||
agItem, err := c.CloudAPI().RG().AffinityGroupsGet(ctx, agReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group affinity groups get with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGAffinityGroupsGetCheckPresence: response from CloudAPI().RG().AffinityGroupsGet", map[string]any{"response": agItem})
|
||||
|
||||
return agItem, err
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
)
|
||||
|
||||
func RGAffinityGroupsListCheckPresence(ctx context.Context, plan *models.DataSourceRGAffinityGroupsListModel, c *decort.DecortClient) (*rg.ListAffinityGroups, error) {
|
||||
agListReq := rg.AffinityGroupsListRequest{RGID: uint64(plan.RGID.ValueInt64())}
|
||||
|
||||
if !plan.Page.IsNull() {
|
||||
agListReq.Page = uint64(plan.Page.ValueInt64())
|
||||
}
|
||||
if !plan.Size.IsNull() {
|
||||
agListReq.Size = uint64(plan.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGAffinityGroupsListCheckPresence: before call CloudAPI().RG().AffinityGroupsList", map[string]any{"req": agListReq})
|
||||
agList, err := c.CloudAPI().RG().AffinityGroupsList(ctx, agListReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group affinity groups list with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGAffinityGroupsListCheckPresence: response from CloudAPI().RG().AffinityGroupsList", map[string]any{"response": agList})
|
||||
|
||||
return agList, err
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
)
|
||||
|
||||
func RGAuditsCheckPresence(ctx context.Context, plan *models.DataSourceRGAuditsModel, c *decort.DecortClient) (*rg.ListAudits, error) {
|
||||
auditsReq := rg.AuditsRequest{
|
||||
RGID: uint64(plan.RGID.ValueInt64()),
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGAuditsCheckPresence: before call CloudAPI().RG().Audits", map[string]any{"response": auditsReq})
|
||||
rgAudtis, err := c.CloudAPI().RG().Audits(ctx, auditsReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group audits with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGAuditsCheckPresence: response from CloudAPI().RG().Audits", map[string]any{"response": rgAudtis})
|
||||
|
||||
return &rgAudtis, err
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
)
|
||||
|
||||
func RGGetResourceConsumptionCheckPresence(ctx context.Context, plan *models.DataSourceRGGetResourceConsumptionModel, c *decort.DecortClient) (*rg.ItemResourceConsumption, error) {
|
||||
resConsReq := rg.GetResourceConsumptionRequest{RGID: uint64(plan.RGID.ValueInt64())}
|
||||
|
||||
tflog.Info(ctx, "RGGetResourceConsumptionCheckPresence: before call CloudAPI().RG().GetResourceConsumption", map[string]any{"response": resConsReq})
|
||||
resCons, err := c.CloudAPI().RG().GetResourceConsumption(ctx, resConsReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group get resource consumption with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGGetResourceConsumptionCheckPresence: response from CloudAPI().RG().GetResourceConsumption", map[string]any{"response": resCons})
|
||||
|
||||
return resCons, err
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
)
|
||||
|
||||
func RGListCheckPresence(ctx context.Context, plan *models.DataSourceRGListModel, c *decort.DecortClient) (*rg.ListResourceGroups, error) {
|
||||
listReq := rg.ListRequest{}
|
||||
|
||||
if !plan.ById.IsNull() {
|
||||
listReq.ByID = uint64(plan.ById.ValueInt64())
|
||||
}
|
||||
if !plan.Name.IsNull() {
|
||||
listReq.Name = plan.Name.ValueString()
|
||||
}
|
||||
if !plan.AccountId.IsNull() {
|
||||
listReq.AccountID = uint64(plan.AccountId.ValueInt64())
|
||||
}
|
||||
if !plan.AccountName.IsNull() {
|
||||
listReq.AccountName = plan.AccountName.ValueString()
|
||||
}
|
||||
if !plan.CreatedAfter.IsNull() {
|
||||
listReq.CreatedAfter = uint64(plan.CreatedAfter.ValueInt64())
|
||||
}
|
||||
if !plan.CreatedBefore.IsNull() {
|
||||
listReq.CreatedBefore = uint64(plan.CreatedBefore.ValueInt64())
|
||||
}
|
||||
if !plan.Status.IsNull() {
|
||||
listReq.Status = plan.Status.ValueString()
|
||||
}
|
||||
if !plan.LockStatus.IsNull() {
|
||||
listReq.LockStatus = plan.LockStatus.ValueString()
|
||||
}
|
||||
if !plan.IncludeDeleted.IsNull() {
|
||||
listReq.IncludeDeleted = plan.IncludeDeleted.ValueBool()
|
||||
}
|
||||
if !plan.SortBy.IsNull() {
|
||||
listReq.SortBy = plan.SortBy.ValueString()
|
||||
}
|
||||
if !plan.Page.IsNull() {
|
||||
listReq.Page = uint64(plan.Page.ValueInt64())
|
||||
}
|
||||
if !plan.Size.IsNull() {
|
||||
listReq.Size = uint64(plan.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListCheckPresence: before call CloudAPI().RG().List", map[string]any{"response": listReq})
|
||||
rgList, err := c.CloudAPI().RG().List(ctx, listReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListCheckPresence: response from CloudAPI().RG().List", map[string]any{"response": rgList})
|
||||
|
||||
return rgList, err
|
||||
}
|
||||
@@ -0,0 +1,61 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
)
|
||||
|
||||
func RGListComputesCheckPresence(ctx context.Context, plan *models.DataSourceRGListComputesModel, c *decort.DecortClient) (*rg.ListComputes, error) {
|
||||
listCompReq := rg.ListComputesRequest{
|
||||
RGID: uint64(plan.RGID.ValueInt64()),
|
||||
}
|
||||
|
||||
if !plan.ComputeID.IsNull() {
|
||||
listCompReq.ComputeID = uint64(plan.ComputeID.ValueInt64())
|
||||
}
|
||||
if !plan.Name.IsNull() {
|
||||
listCompReq.Name = plan.Name.ValueString()
|
||||
}
|
||||
if !plan.AccountID.IsNull() {
|
||||
listCompReq.AccountID = uint64(plan.AccountID.ValueInt64())
|
||||
}
|
||||
if !plan.TechStatus.IsNull() {
|
||||
listCompReq.TechStatus = plan.TechStatus.ValueString()
|
||||
}
|
||||
if !plan.Status.IsNull() {
|
||||
listCompReq.Status = plan.Status.ValueString()
|
||||
}
|
||||
if !plan.IPAddress.IsNull() {
|
||||
listCompReq.IPAddress = plan.IPAddress.ValueString()
|
||||
}
|
||||
if !plan.ExtNetName.IsNull() {
|
||||
listCompReq.ExtNetName = plan.ExtNetName.ValueString()
|
||||
}
|
||||
if !plan.ExtNetID.IsNull() {
|
||||
listCompReq.ExtNetID = uint64(plan.ExtNetID.ValueInt64())
|
||||
}
|
||||
if !plan.SortBy.IsNull() {
|
||||
listCompReq.SortBy = plan.SortBy.ValueString()
|
||||
}
|
||||
if !plan.Page.IsNull() {
|
||||
listCompReq.Page = uint64(plan.Page.ValueInt64())
|
||||
}
|
||||
if !plan.Size.IsNull() {
|
||||
listCompReq.Size = uint64(plan.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListComputesCheckPresence: before call CloudAPI().RG().ListComputes", map[string]any{"response": listCompReq})
|
||||
rgListComp, err := c.CloudAPI().RG().ListComputes(ctx, listCompReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group list computes with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListComputesCheckPresence: response from CloudAPI().RG().ListComputes", map[string]any{"response": rgListComp})
|
||||
|
||||
return rgListComp, err
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
)
|
||||
|
||||
func RGListDeletedCheckPresence(ctx context.Context, plan *models.DataSourceRGListDeletedModel, c *decort.DecortClient) (*rg.ListResourceGroups, error) {
|
||||
listDelReq := rg.ListDeletedRequest{}
|
||||
|
||||
if !plan.ById.IsNull() {
|
||||
listDelReq.ByID = uint64(plan.ById.ValueInt64())
|
||||
}
|
||||
if !plan.Name.IsNull() {
|
||||
listDelReq.Name = plan.Name.ValueString()
|
||||
}
|
||||
if !plan.AccountId.IsNull() {
|
||||
listDelReq.AccountID = uint64(plan.AccountId.ValueInt64())
|
||||
}
|
||||
if !plan.AccountName.IsNull() {
|
||||
listDelReq.AccountName = plan.AccountName.ValueString()
|
||||
}
|
||||
if !plan.CreatedAfter.IsNull() {
|
||||
listDelReq.CreatedAfter = uint64(plan.CreatedAfter.ValueInt64())
|
||||
}
|
||||
if !plan.CreatedBefore.IsNull() {
|
||||
listDelReq.CreatedBefore = uint64(plan.CreatedBefore.ValueInt64())
|
||||
}
|
||||
if !plan.SortBy.IsNull() {
|
||||
listDelReq.SortBy = plan.SortBy.ValueString()
|
||||
}
|
||||
if !plan.LockStatus.IsNull() {
|
||||
listDelReq.LockStatus = plan.LockStatus.ValueString()
|
||||
}
|
||||
if !plan.Page.IsNull() {
|
||||
listDelReq.Page = uint64(plan.Page.ValueInt64())
|
||||
}
|
||||
if !plan.Size.IsNull() {
|
||||
listDelReq.Size = uint64(plan.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListDeletedCheckPresence: before call CloudAPI().RG().ListDeleted", map[string]any{"response": listDelReq})
|
||||
rgListDel, err := c.CloudAPI().RG().ListDeleted(ctx, listDelReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group list deleted with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListDeletedCheckPresence: response from CloudAPI().RG().ListDeleted", map[string]any{"response": rgListDel})
|
||||
|
||||
return rgListDel, err
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
)
|
||||
|
||||
func RGListLBCheckPresence(ctx context.Context, plan *models.DataSourceRGListLBModel, c *decort.DecortClient) (*rg.ListLB, error) {
|
||||
listLBReq := rg.ListLBRequest{
|
||||
RGID: uint64(plan.RGID.ValueInt64()),
|
||||
}
|
||||
|
||||
if !plan.ByID.IsNull() {
|
||||
listLBReq.ByID = uint64(plan.ByID.ValueInt64())
|
||||
}
|
||||
if !plan.Name.IsNull() {
|
||||
listLBReq.Name = plan.Name.ValueString()
|
||||
}
|
||||
if !plan.TechStatus.IsNull() {
|
||||
listLBReq.TechStatus = plan.TechStatus.ValueString()
|
||||
}
|
||||
if !plan.Status.IsNull() {
|
||||
listLBReq.Status = plan.Status.ValueString()
|
||||
}
|
||||
if !plan.FrontIP.IsNull() {
|
||||
listLBReq.FrontIP = plan.FrontIP.ValueString()
|
||||
}
|
||||
if !plan.BackIP.IsNull() {
|
||||
listLBReq.BackIP = plan.BackIP.ValueString()
|
||||
}
|
||||
if !plan.SortBy.IsNull() {
|
||||
listLBReq.SortBy = plan.SortBy.ValueString()
|
||||
}
|
||||
if !plan.Page.IsNull() {
|
||||
listLBReq.Page = uint64(plan.Page.ValueInt64())
|
||||
}
|
||||
if !plan.Size.IsNull() {
|
||||
listLBReq.Size = uint64(plan.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListLBCheckPresence: before call CloudAPI().RG().ListLB", map[string]any{"response": listLBReq})
|
||||
rgListLB, err := c.CloudAPI().RG().ListLB(ctx, listLBReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group list lb with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListLBCheckPresence: response from CloudAPI().RG().ListLB", map[string]any{"response": rgListLB})
|
||||
|
||||
return rgListLB, err
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
)
|
||||
|
||||
func RGListPFWCheckPresence(ctx context.Context, plan *models.DataSourceRGListPFWModel, c *decort.DecortClient) (*rg.ListPortForwards, error) {
|
||||
listPFWReq := rg.ListPFWRequest{
|
||||
RGID: uint64(plan.RGID.ValueInt64()),
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListPFWCheckPresence: before call CloudAPI().RG().ListPFW", map[string]any{"response": listPFWReq})
|
||||
rgListPFW, err := c.CloudAPI().RG().ListPFW(ctx, listPFWReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group list pfw with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListPFWCheckPresence: response from CloudAPI().RG().ListPFW", map[string]any{"response": rgListPFW})
|
||||
|
||||
return rgListPFW, err
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
)
|
||||
|
||||
func RGListVinsCheckPresence(ctx context.Context, plan *models.DataSourceRGListVinsModel, c *decort.DecortClient) (*rg.ListVINS, error) {
|
||||
listVinsReq := rg.ListVINSRequest{
|
||||
RGID: uint64(plan.RGID.ValueInt64()),
|
||||
}
|
||||
|
||||
if !plan.Name.IsNull() {
|
||||
listVinsReq.Name = plan.Name.ValueString()
|
||||
}
|
||||
if !plan.AccountID.IsNull() {
|
||||
listVinsReq.AccountID = uint64(plan.AccountID.ValueInt64())
|
||||
}
|
||||
if !plan.ExtIP.IsNull() {
|
||||
listVinsReq.ExtIP = plan.ExtIP.ValueString()
|
||||
}
|
||||
if !plan.VINSID.IsNull() {
|
||||
listVinsReq.VINSID = uint64(plan.VINSID.ValueInt64())
|
||||
}
|
||||
if !plan.SortBy.IsNull() {
|
||||
listVinsReq.SortBy = plan.SortBy.ValueString()
|
||||
}
|
||||
if !plan.Page.IsNull() {
|
||||
listVinsReq.Page = uint64(plan.Page.ValueInt64())
|
||||
}
|
||||
if !plan.Size.IsNull() {
|
||||
listVinsReq.Size = uint64(plan.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListVinsCheckPresence: before call CloudAPI().RG().ListVINS", map[string]any{"response": listVinsReq})
|
||||
rgListVins, err := c.CloudAPI().RG().ListVINS(ctx, listVinsReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group list vins with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGListVinsCheckPresence: response from CloudAPI().RG().ListVINS", map[string]any{"response": rgListVins})
|
||||
|
||||
return rgListVins, err
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
)
|
||||
|
||||
func RGResourceConsumptionListCheckPresence(ctx context.Context, c *decort.DecortClient) (*rg.ListResourceConsumption, error) {
|
||||
tflog.Info(ctx, "RGResourceConsumptionListCheckPresence: before call CloudAPI().RG().ListResourceConsumption")
|
||||
resConsList, err := c.CloudAPI().RG().ListResourceConsumption(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group resource consumption list with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGResourceConsumptionListCheckPresence: response from CloudAPI().RG().ListResourceConsumption", map[string]any{"response": resConsList})
|
||||
|
||||
return resConsList, err
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
)
|
||||
|
||||
func RGUsageCheckPresence(ctx context.Context, plan *models.DataSourceRGUsageModel, c *decort.DecortClient) (*rg.RecordResourceUsage, error) {
|
||||
usageReq := rg.UsageRequest{RGID: uint64(plan.RGID.ValueInt64())}
|
||||
|
||||
if !plan.Reason.IsNull() {
|
||||
usageReq.Reason = plan.Reason.ValueString()
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGUsageCheckPresence: before call CloudAPI().RG().Usage", map[string]any{"response": usageReq})
|
||||
usage, err := c.CloudAPI().RG().Usage(ctx, usageReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group resource usage with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RGUsageCheckPresence: response from CloudAPI().RG().Usage", map[string]any{"response": usage})
|
||||
|
||||
return usage, err
|
||||
}
|
||||
731
internal/service/cloudapi/rg/utilities/utility_resource_rg.go
Normal file
731
internal/service/cloudapi/rg/utilities/utility_resource_rg.go
Normal file
@@ -0,0 +1,731 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status"
|
||||
)
|
||||
|
||||
func CreateRequestResourceRG(ctx context.Context, plan *models.ResourceRGModel) (rg.CreateRequest, diag.Diagnostics) {
|
||||
tflog.Info(ctx, "Start CreateRequestResourceRG", map[string]any{
|
||||
"name": plan.Name.ValueString(),
|
||||
"account_id": plan.AccountID.ValueInt64(),
|
||||
"gid": plan.GID.ValueInt64(),
|
||||
})
|
||||
|
||||
// set up required parameters in resource group create request
|
||||
createReq := rg.CreateRequest{
|
||||
Name: plan.Name.ValueString(),
|
||||
AccountID: uint64(plan.AccountID.ValueInt64()),
|
||||
GID: uint64(plan.GID.ValueInt64()),
|
||||
}
|
||||
|
||||
// set up quota optional parameters
|
||||
if !plan.Quota.IsUnknown() {
|
||||
var quota models.QuotaModel
|
||||
diags := plan.Quota.As(ctx, "a, basetypes.ObjectAsOptions{})
|
||||
if diags.HasError() {
|
||||
return createReq, diags
|
||||
}
|
||||
|
||||
if quota.CPU.IsUnknown() {
|
||||
createReq.MaxCPUCapacity = -1 // default value -1
|
||||
} else {
|
||||
createReq.MaxCPUCapacity = quota.CPU.ValueInt64()
|
||||
}
|
||||
if quota.Disk.IsUnknown() {
|
||||
createReq.MaxVDiskCapacity = -1 // default value -1
|
||||
} else {
|
||||
createReq.MaxVDiskCapacity = quota.Disk.ValueInt64()
|
||||
}
|
||||
if quota.Ram.IsUnknown() {
|
||||
createReq.MaxMemoryCapacity = -1 // default value -1
|
||||
} else {
|
||||
createReq.MaxMemoryCapacity = quota.Ram.ValueInt64()
|
||||
}
|
||||
if quota.ExtTraffic.IsUnknown() {
|
||||
createReq.MaxNetworkPeerTransfer = -1 // default value -1
|
||||
} else {
|
||||
createReq.MaxNetworkPeerTransfer = quota.ExtTraffic.ValueInt64()
|
||||
}
|
||||
if quota.ExtIps.IsUnknown() {
|
||||
createReq.MaxNumPublicIP = -1 // default value -1
|
||||
} else {
|
||||
createReq.MaxNumPublicIP = quota.ExtIps.ValueInt64()
|
||||
}
|
||||
} else {
|
||||
createReq.MaxCPUCapacity = -1
|
||||
createReq.MaxVDiskCapacity = -1
|
||||
createReq.MaxMemoryCapacity = -1
|
||||
createReq.MaxNetworkPeerTransfer = -1
|
||||
createReq.MaxNumPublicIP = -1
|
||||
}
|
||||
|
||||
// set up defNet, owner, ipcidr, description, reason, extNetId, extIp, registerComputes optional parameters
|
||||
if plan.DefNetType.IsNull() {
|
||||
createReq.DefNet = "PRIVATE" // default value
|
||||
} else {
|
||||
createReq.DefNet = plan.DefNetType.ValueString()
|
||||
}
|
||||
if !plan.Owner.IsNull() {
|
||||
createReq.Owner = plan.Owner.ValueString()
|
||||
}
|
||||
if !plan.IPCIDR.IsNull() {
|
||||
createReq.IPCIDR = plan.IPCIDR.ValueString()
|
||||
}
|
||||
if !plan.Description.IsNull() {
|
||||
createReq.Description = plan.Description.ValueString()
|
||||
}
|
||||
if !plan.Reason.IsNull() {
|
||||
createReq.Reason = plan.Reason.ValueString()
|
||||
}
|
||||
if plan.ExtNetID.IsNull() {
|
||||
createReq.ExtNetID = 0 // default value 0
|
||||
} else {
|
||||
createReq.ExtNetID = uint64(plan.ExtNetID.ValueInt64())
|
||||
}
|
||||
if !plan.ExtIP.IsNull() {
|
||||
createReq.ExtIP = plan.ExtIP.ValueString()
|
||||
}
|
||||
if plan.RegisterComputes.IsNull() {
|
||||
createReq.RegisterComputes = false // default value
|
||||
} else {
|
||||
createReq.RegisterComputes = plan.RegisterComputes.ValueBool()
|
||||
}
|
||||
|
||||
return createReq, nil
|
||||
}
|
||||
|
||||
func RGCheckPresence(ctx context.Context, rgId uint64, c *decort.DecortClient) (*rg.RecordResourceGroup, error) {
|
||||
tflog.Info(ctx, fmt.Sprintf("Get info about resource group with ID - %v", rgId))
|
||||
|
||||
recordRG, err := c.CloudAPI().RG().Get(ctx, rg.GetRequest{RGID: rgId})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about resource group with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "UtilityRGCheckPresence resourceRG: response from CloudAPI().RG().Get", map[string]any{"rg_id": rgId, "response": recordRG})
|
||||
|
||||
return recordRG, err
|
||||
}
|
||||
|
||||
// RestoreRG performs resource group Restore request. Returns error in case of failures.
|
||||
func RestoreRG(ctx context.Context, rgId uint64, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
restoreReq := rg.RestoreRequest{RGID: rgId}
|
||||
|
||||
tflog.Info(ctx, "utilityRestoreRG: before calling CloudAPI().RG().Restore", map[string]any{"rgId": rgId, "req": restoreReq})
|
||||
|
||||
res, err := c.CloudAPI().RG().Restore(ctx, restoreReq)
|
||||
if err != nil {
|
||||
diags.AddError(
|
||||
"RestoreRG: cannot restore resource group",
|
||||
err.Error(),
|
||||
)
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, "utilityRestoreRG: response from CloudAPI().RG().Restore", map[string]any{"rg_id": rgId, "response": res})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnableRG performs resource group Enable request
|
||||
func EnableRG(ctx context.Context, rgId uint64, plan *models.ResourceRGModel, c *decort.DecortClient) error {
|
||||
enableReq := rg.EnableRequest{RGID: rgId}
|
||||
if !plan.Reason.IsNull() {
|
||||
enableReq.Reason = plan.Reason.ValueString()
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "utilityEnableRG: before calling CloudAPI().RG().Enable", map[string]any{"rg_id": rgId, "req": enableReq})
|
||||
|
||||
res, err := c.CloudAPI().RG().Enable(ctx, enableReq)
|
||||
|
||||
tflog.Info(ctx, "utilityEnableRG: response from CloudAPI().RG().Enable", map[string]any{"rg_id": rgId, "response": res})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DisableRG performs resource group Disable request
|
||||
func DisableRG(ctx context.Context, rgId uint64, plan *models.ResourceRGModel, c *decort.DecortClient) error {
|
||||
disableReq := rg.DisableRequest{RGID: rgId}
|
||||
if !plan.Reason.IsNull() {
|
||||
disableReq.Reason = plan.Reason.ValueString()
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "utilityDisableRG: before calling CloudAPI().RG().Disable", map[string]any{"rg_id": rgId, "req": disableReq})
|
||||
|
||||
res, err := c.CloudAPI().RG().Disable(ctx, disableReq)
|
||||
|
||||
tflog.Info(ctx, "utilityDisableRG: response from CloudAPI().RG().Disable", map[string]any{"rg_id": rgId, "response": res})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateRG compares plan and state for resource group fields name, description, quota, register_computes.
|
||||
// If any changes are detected, Update request is performed. If not, no update is performed.
|
||||
func UpdateRG(ctx context.Context, rgId uint64, plan, state *models.ResourceRGModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
var updateNeeded bool
|
||||
var diags diag.Diagnostics
|
||||
|
||||
updateReq := rg.UpdateRequest{
|
||||
RGID: rgId,
|
||||
}
|
||||
if !state.Reason.IsNull() {
|
||||
updateReq.Reason = state.Reason.ValueString()
|
||||
}
|
||||
|
||||
if !plan.Name.Equal(state.Name) {
|
||||
updateReq.Name = plan.Name.ValueString()
|
||||
tflog.Info(ctx, "utilityUpdateRG: new name specified", map[string]any{
|
||||
"rg_id": plan.Id.ValueString(),
|
||||
"name_plan": plan.Name.ValueString(),
|
||||
"name_state": state.Name.ValueString()})
|
||||
updateNeeded = true
|
||||
}
|
||||
|
||||
if !plan.Description.Equal(state.Description) {
|
||||
updateReq.Description = plan.Description.ValueString()
|
||||
tflog.Info(ctx, "utilityUpdateRG: new description specified", map[string]any{
|
||||
"rg_id": plan.Id.ValueString(),
|
||||
"description_plan": plan.Description.ValueString(),
|
||||
"description_state": state.Description.ValueString()})
|
||||
updateNeeded = true
|
||||
}
|
||||
|
||||
if !plan.RegisterComputes.Equal(state.RegisterComputes) {
|
||||
if plan.RegisterComputes.IsNull() {
|
||||
updateReq.RegisterComputes = false // default value
|
||||
} else {
|
||||
updateReq.RegisterComputes = plan.RegisterComputes.ValueBool()
|
||||
}
|
||||
tflog.Info(ctx, "utilityUpdateRG: new register_computes specified", map[string]any{
|
||||
"rg_id": plan.Id.ValueString(),
|
||||
"register_computes_plan": plan.RegisterComputes.ValueBool(),
|
||||
"register_computes_state": state.RegisterComputes.ValueBool()})
|
||||
updateNeeded = true
|
||||
}
|
||||
|
||||
var updQuotaNeeded bool
|
||||
|
||||
var quotaPlan, quotaState models.QuotaModel
|
||||
if !plan.Quota.IsNull() {
|
||||
diags = plan.Quota.As(ctx, "aPlan, basetypes.ObjectAsOptions{})
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
}
|
||||
if !state.Quota.IsNull() {
|
||||
diags = state.Quota.As(ctx, "aState, basetypes.ObjectAsOptions{})
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
}
|
||||
|
||||
if !plan.Quota.IsNull() && !state.Quota.IsNull() {
|
||||
if !quotaPlan.CPU.Equal(quotaState.CPU) {
|
||||
updateReq.MaxCPUCapacity = quotaPlan.CPU.ValueInt64()
|
||||
updQuotaNeeded = true
|
||||
}
|
||||
if !quotaPlan.Disk.Equal(quotaState.Disk) {
|
||||
updateReq.MaxVDiskCapacity = quotaPlan.Disk.ValueInt64()
|
||||
updQuotaNeeded = true
|
||||
}
|
||||
if !quotaPlan.Ram.Equal(quotaState.Ram) {
|
||||
updateReq.MaxMemoryCapacity = quotaPlan.Ram.ValueInt64()
|
||||
updQuotaNeeded = true
|
||||
}
|
||||
if !quotaPlan.ExtTraffic.Equal(quotaState.ExtTraffic) {
|
||||
updateReq.MaxNetworkPeerTransfer = quotaPlan.ExtTraffic.ValueInt64()
|
||||
updQuotaNeeded = true
|
||||
}
|
||||
if !quotaPlan.ExtIps.Equal(quotaState.ExtIps) {
|
||||
updateReq.MaxNumPublicIP = quotaPlan.ExtIps.ValueInt64()
|
||||
updQuotaNeeded = true
|
||||
}
|
||||
}
|
||||
|
||||
if state.Quota.IsNull() {
|
||||
if !quotaPlan.CPU.IsNull() {
|
||||
updateReq.MaxCPUCapacity = quotaPlan.CPU.ValueInt64()
|
||||
}
|
||||
if !quotaPlan.Disk.IsNull() {
|
||||
updateReq.MaxVDiskCapacity = quotaPlan.Disk.ValueInt64()
|
||||
}
|
||||
if !quotaPlan.Ram.IsNull() {
|
||||
updateReq.MaxMemoryCapacity = quotaPlan.Ram.ValueInt64()
|
||||
}
|
||||
if !quotaPlan.ExtTraffic.IsNull() {
|
||||
updateReq.MaxNetworkPeerTransfer = quotaPlan.ExtTraffic.ValueInt64()
|
||||
}
|
||||
if !quotaPlan.ExtIps.IsNull() {
|
||||
updateReq.MaxNumPublicIP = quotaPlan.ExtIps.ValueInt64()
|
||||
}
|
||||
updQuotaNeeded = true
|
||||
}
|
||||
// plan.Quota.IsNull() == true: we do not change quota in case it used to be set and then deleted
|
||||
|
||||
if updQuotaNeeded {
|
||||
tflog.Info(ctx, "utilityUpdateRG: new quota specified", map[string]any{
|
||||
"rg_id": plan.Id.ValueString()})
|
||||
updateNeeded = true
|
||||
}
|
||||
|
||||
if updateNeeded {
|
||||
tflog.Info(ctx, "utilityUpdateRG: before calling CloudAPI().RG().Update", map[string]any{"rg_id": plan.Id.ValueString(), "req": updateReq})
|
||||
res, err := c.CloudAPI().RG().Update(ctx, updateReq)
|
||||
tflog.Info(ctx, "utilityUpdateRG: response from CloudAPI().RG().Update", map[string]any{"rg_id": plan.Id.ValueString(), "response": res})
|
||||
if err != nil {
|
||||
diags.AddError("can not update RG", err.Error())
|
||||
return diags
|
||||
}
|
||||
}
|
||||
if !updateNeeded {
|
||||
tflog.Info(ctx, "utilityUpdateRG: call for CloudAPI().RG().Update was not needed", map[string]any{"rg_id": plan.Id.ValueString()})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnableDisableCreateRG performs Enable request is enable is true, and Disable request otherwise.
|
||||
// In case of failure returns warnings.
|
||||
func EnableDisableCreateRG(ctx context.Context, rgId uint64, plan *models.ResourceRGModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
var enable bool
|
||||
if plan.Enable.IsNull() {
|
||||
enable = true // default value
|
||||
} else {
|
||||
enable = plan.Enable.ValueBool()
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "EnableDisableCreateRG: resource group to be enabled/disabled", map[string]any{
|
||||
"rg_id": rgId,
|
||||
"enable": enable})
|
||||
|
||||
if enable {
|
||||
err := EnableRG(ctx, rgId, plan, c)
|
||||
if err != nil {
|
||||
diags.AddWarning(
|
||||
"EnableDisableCreateRG: cannot enable rg",
|
||||
err.Error(),
|
||||
)
|
||||
return diags
|
||||
}
|
||||
}
|
||||
|
||||
if !enable {
|
||||
err := DisableRG(ctx, rgId, plan, c)
|
||||
if err != nil {
|
||||
diags.AddWarning(
|
||||
"EnableDisableCreateRG: cannot disable rg",
|
||||
err.Error(),
|
||||
)
|
||||
return diags
|
||||
}
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "EnableDisableCreateRG: resource group is successfully enabled/disabled", map[string]any{"rg_id": rgId, "enable": enable})
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnableDisableUpdateRG performs Enable request is enable is true, and Disable request otherwise.
|
||||
// In case of failure returns errors.
|
||||
func EnableDisableUpdateRG(ctx context.Context, rgId uint64, plan *models.ResourceRGModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
var enable bool
|
||||
if plan.Enable.IsNull() {
|
||||
enable = true // default value
|
||||
} else {
|
||||
enable = plan.Enable.ValueBool()
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "EnableDisableUpdateRG: resource group to be enabled/disabled", map[string]any{
|
||||
"rg_id": rgId,
|
||||
"enable": enable})
|
||||
|
||||
if enable {
|
||||
err := EnableRG(ctx, rgId, plan, c)
|
||||
if err != nil {
|
||||
diags.AddError(
|
||||
"EnableDisableUpdateRG: cannot enable rg",
|
||||
err.Error(),
|
||||
)
|
||||
return diags
|
||||
}
|
||||
}
|
||||
|
||||
if !enable {
|
||||
err := DisableRG(ctx, rgId, plan, c)
|
||||
if err != nil {
|
||||
diags.AddError(
|
||||
"EnableDisableUpdateRG: cannot disable rg",
|
||||
err.Error(),
|
||||
)
|
||||
return diags
|
||||
}
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "EnableDisableUpdateRG: resource group is successfully enabled/disabled", map[string]any{"rg_id": rgId, "enable": enable})
|
||||
return nil
|
||||
}
|
||||
|
||||
// AccessUpdateRG compares plan and state for resource group field access.
|
||||
// If changes are detected, AccessRevoke request is performed for each deleted access user and AccessGrant request is
|
||||
// performed for each added access user. If no changes are detected, no requests performed.
|
||||
// Returns errors in case of failures.
|
||||
func AccessUpdateRG(ctx context.Context, rgId uint64, plan, state *models.ResourceRGModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
itemsAccessPlan := make([]models.AccessModel, 0, len(plan.Access.Elements()))
|
||||
diags = plan.Access.ElementsAs(ctx, &itemsAccessPlan, false)
|
||||
if diags.HasError() {
|
||||
tflog.Error(ctx, "AccessUpdateRG: cannot populate itemsAccess with plan.Access List elements")
|
||||
return diags
|
||||
}
|
||||
|
||||
itemsAccessState := make([]models.AccessModel, 0, len(state.Access.Elements()))
|
||||
diags = state.Access.ElementsAs(ctx, &itemsAccessState, false)
|
||||
if diags.HasError() {
|
||||
tflog.Error(ctx, "AccessUpdateRG: cannot populate itemsAccess with state.Access List elements")
|
||||
return diags
|
||||
}
|
||||
|
||||
// define accesses to be revoked and revoke them
|
||||
var deletedAccess []models.AccessModel
|
||||
for _, accessStateElem := range itemsAccessState {
|
||||
if !accessStateElem.Contains(itemsAccessPlan) {
|
||||
deletedAccess = append(deletedAccess, accessStateElem)
|
||||
}
|
||||
}
|
||||
|
||||
if len(deletedAccess) == 0 {
|
||||
tflog.Info(ctx, "AccessUpdateRG: no access needs to be revoked", map[string]any{
|
||||
"rg_id": plan.Id.ValueString()})
|
||||
}
|
||||
if len(deletedAccess) > 0 {
|
||||
tflog.Info(ctx, "AccessUpdateRG: access needs to be revoked", map[string]any{
|
||||
"rg_id": plan.Id.ValueString(),
|
||||
"deleted_access": deletedAccess})
|
||||
|
||||
for _, deletedAccessItem := range deletedAccess {
|
||||
revokeReq := rg.AccessRevokeRequest{
|
||||
RGID: rgId,
|
||||
User: deletedAccessItem.User.ValueString(),
|
||||
}
|
||||
if !deletedAccessItem.Reason.IsNull() {
|
||||
revokeReq.Reason = deletedAccessItem.Reason.ValueString()
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "AccessUpdateRG: before calling CloudAPI().RG().AccessRevoke", map[string]any{"rg_id": plan.Id.ValueString(), "req": revokeReq})
|
||||
res, err := c.CloudAPI().RG().AccessRevoke(ctx, revokeReq)
|
||||
tflog.Info(ctx, "AccessUpdateRG: response from CloudAPI().RG().AccessRevoke", map[string]any{"rg_id": plan.Id.ValueString(), "response": res})
|
||||
if err != nil {
|
||||
diags.AddError(
|
||||
"AccessUpdateRG: cannot revoke access for rg",
|
||||
err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// define accesses to be granted and grant them
|
||||
var addedAccess []models.AccessModel
|
||||
for _, accessPlanElem := range itemsAccessPlan {
|
||||
if !accessPlanElem.Contains(itemsAccessState) {
|
||||
addedAccess = append(addedAccess, accessPlanElem)
|
||||
}
|
||||
}
|
||||
|
||||
if len(addedAccess) == 0 {
|
||||
tflog.Info(ctx, "AccessUpdateRG: no access need to be granted", map[string]any{
|
||||
"rg_id": plan.Id.ValueString()})
|
||||
}
|
||||
if len(addedAccess) > 0 {
|
||||
tflog.Info(ctx, "AccessUpdateRG: access needs to be granted", map[string]any{
|
||||
"rg_id": plan.Id.ValueString(),
|
||||
"added_access": addedAccess})
|
||||
|
||||
for _, addedAccessItem := range addedAccess {
|
||||
grantReq := rg.AccessGrantRequest{
|
||||
RGID: rgId,
|
||||
User: addedAccessItem.User.ValueString(),
|
||||
Right: addedAccessItem.Right.ValueString(),
|
||||
}
|
||||
if !addedAccessItem.Reason.IsNull() {
|
||||
grantReq.Reason = addedAccessItem.Reason.ValueString()
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "AccessUpdateRG: before calling CloudAPI().RG().AccessGrant", map[string]any{"rg_id": plan.Id.ValueString(), "req": grantReq})
|
||||
res, err := c.CloudAPI().RG().AccessGrant(ctx, grantReq)
|
||||
tflog.Info(ctx, "AccessUpdateRG: response from CloudAPI().RG().AccessGrant", map[string]any{"rg_id": plan.Id.ValueString(), "response": res})
|
||||
if err != nil {
|
||||
diags.AddError(
|
||||
"AccessUpdateRG: cannot grant access for rg",
|
||||
err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
// AccessCreateRG grants access to users specified in access field for created resource.
|
||||
// In case of failure returns warnings.
|
||||
func AccessCreateRG(ctx context.Context, rgId uint64, plan *models.ResourceRGModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
if len(plan.Access.Elements()) != 0 {
|
||||
tflog.Info(ctx, "AccessCreateRG: access needs to be granted", map[string]any{
|
||||
"rg_id": rgId,
|
||||
"access_plan": plan.Access.Elements()})
|
||||
|
||||
itemsAccessPlan := make([]models.AccessModel, 0, len(plan.Access.Elements()))
|
||||
diagsItem := plan.Access.ElementsAs(ctx, &itemsAccessPlan, false)
|
||||
if diagsItem.HasError() {
|
||||
tflog.Warn(ctx, "cannot populate itemsAccess with plan.Access List elements")
|
||||
diags.AddWarning(fmt.Sprintf("AccessCreateRG: Unable to get access info for RG %d", rgId),
|
||||
"cannot populate itemsAccess with plan.Access List elements",
|
||||
)
|
||||
return diags
|
||||
}
|
||||
|
||||
for _, addedAccessItem := range itemsAccessPlan {
|
||||
grantReq := rg.AccessGrantRequest{
|
||||
RGID: rgId,
|
||||
User: addedAccessItem.User.ValueString(),
|
||||
Right: addedAccessItem.Right.ValueString(),
|
||||
}
|
||||
if !addedAccessItem.Reason.IsNull() {
|
||||
grantReq.Reason = addedAccessItem.Reason.ValueString()
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "AccessCreateRG: before calling CloudAPI().RG().AccessGrant", map[string]any{
|
||||
"rg_id": rgId,
|
||||
"req": grantReq})
|
||||
res, err := c.CloudAPI().RG().AccessGrant(ctx, grantReq)
|
||||
if err != nil {
|
||||
diags.AddWarning("AccessCreateRG: Unable to grant access for RG",
|
||||
err.Error())
|
||||
}
|
||||
tflog.Info(ctx, "AccessCreateRG: response from CloudAPI().RG().AccessGrant", map[string]any{
|
||||
"rg_id": rgId,
|
||||
"response": res})
|
||||
}
|
||||
}
|
||||
|
||||
if len(plan.Access.Elements()) == 0 {
|
||||
tflog.Info(ctx, "AccessCreateRG: no access need to be granted", map[string]any{
|
||||
"rg_id": rgId,
|
||||
"access_plan": plan.Access.Elements()})
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
// SetDefNetUpdateRG compares plan and state for resource group update field def_net.
|
||||
// If any changes are detected, SetDefNet request is performed. If not, no SetDefNet is performed.
|
||||
// Returns error in case of failures.
|
||||
func SetDefNetUpdateRG(ctx context.Context, rgId uint64, plan, state *models.ResourceRGModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
var setDefNetNeeded bool
|
||||
|
||||
setDefNetReq := rg.SetDefNetRequest{
|
||||
RGID: rgId,
|
||||
}
|
||||
|
||||
var itemDefNetPlan, itemDefNetState models.DefNetModel
|
||||
if !plan.DefNet.IsNull() {
|
||||
diags.Append(plan.DefNet.As(ctx, &itemDefNetPlan, basetypes.ObjectAsOptions{})...)
|
||||
if diags.HasError() {
|
||||
tflog.Error(ctx, "SetDefNetUpdateRG: cannot populate defNet with plan.DefNet object element")
|
||||
return diags
|
||||
}
|
||||
}
|
||||
if !state.DefNet.IsNull() {
|
||||
diags.Append(state.DefNet.As(ctx, &itemDefNetState, basetypes.ObjectAsOptions{})...)
|
||||
if diags.HasError() {
|
||||
tflog.Error(ctx, "SetDefNetUpdateRG: cannot populate defNet with state.DefNet object element")
|
||||
return diags
|
||||
}
|
||||
}
|
||||
|
||||
if !plan.DefNet.IsNull() && !state.DefNet.IsNull() {
|
||||
if !itemDefNetPlan.NetId.Equal(itemDefNetState.NetId) {
|
||||
setDefNetNeeded = true
|
||||
}
|
||||
if !itemDefNetPlan.NetType.Equal(itemDefNetState.NetType) {
|
||||
setDefNetNeeded = true
|
||||
}
|
||||
|
||||
} else if !plan.DefNet.IsNull() {
|
||||
setDefNetNeeded = true
|
||||
}
|
||||
|
||||
if setDefNetNeeded {
|
||||
tflog.Info(ctx, "utilitySetDefNetUpdateRG: new def_net specified", map[string]any{
|
||||
"rg_id": plan.Id.ValueString(),
|
||||
"def_net_plan": plan.DefNet,
|
||||
"def_net_state": state.DefNet})
|
||||
setDefNetReq.NetType = itemDefNetPlan.NetType.ValueString()
|
||||
if itemDefNetPlan.NetId.IsNull() {
|
||||
setDefNetReq.NetID = 0 // default value
|
||||
} else {
|
||||
setDefNetReq.NetID = uint64(itemDefNetPlan.NetId.ValueInt64())
|
||||
}
|
||||
if !itemDefNetPlan.Reason.IsNull() {
|
||||
setDefNetReq.Reason = itemDefNetPlan.Reason.ValueString()
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "utilitySetDefNetUpdateRG: before calling CloudAPI().RG().SetDefNet", map[string]any{"rg_id": plan.Id.ValueString(), "req": setDefNetReq})
|
||||
res, err := c.CloudAPI().RG().SetDefNet(ctx, setDefNetReq)
|
||||
if err != nil {
|
||||
diags.AddError(
|
||||
"SetDefNetUpdateRG: can not set defNet for rg",
|
||||
err.Error())
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, "utilitySetDefNetUpdateRG: response from CloudAPI().RG().SetDefNet", map[string]any{"rg_id": plan.Id.ValueString(), "response": res})
|
||||
}
|
||||
|
||||
if !setDefNetNeeded {
|
||||
tflog.Info(ctx, "utilitySetDefNetUpdateRG: call for CloudAPI().RG().SetDefNet was not needed", map[string]any{
|
||||
"rg_id": plan.Id.ValueString(),
|
||||
"def_net_plan": plan.DefNet,
|
||||
"def_net_state": state.DefNet})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDefNetCreateRG performs SetDefNet request if def_net field is not empty. Otherwise, no SetDefNet request is performed.
|
||||
// In case of failure returns warnings.
|
||||
func SetDefNetCreateRG(ctx context.Context, rgId uint64, plan *models.ResourceRGModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
setDefNetReq := rg.SetDefNetRequest{RGID: rgId}
|
||||
|
||||
var itemDefNetPlan models.DefNetModel
|
||||
|
||||
if !plan.DefNet.IsNull() {
|
||||
tflog.Info(ctx, "SetDefNetCreateRG: new def_net specified", map[string]any{
|
||||
"rg_id": rgId,
|
||||
"def_net_plan": plan.DefNet})
|
||||
|
||||
diagItem := plan.DefNet.As(ctx, &itemDefNetPlan, basetypes.ObjectAsOptions{})
|
||||
if diagItem.HasError() {
|
||||
diags.AddWarning(
|
||||
fmt.Sprintf("SetDefNetCreateRG: Unable to setDefNet for RG %d", rgId),
|
||||
"cannot populate defNet with plan.DefNet object element for rg",
|
||||
)
|
||||
return diags
|
||||
}
|
||||
|
||||
setDefNetReq.NetType = itemDefNetPlan.NetType.ValueString()
|
||||
if itemDefNetPlan.NetId.IsNull() {
|
||||
setDefNetReq.NetID = 0 // default value
|
||||
} else {
|
||||
setDefNetReq.NetID = uint64(itemDefNetPlan.NetId.ValueInt64())
|
||||
}
|
||||
if !itemDefNetPlan.Reason.IsNull() {
|
||||
setDefNetReq.Reason = itemDefNetPlan.Reason.ValueString()
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "SetDefNetCreateRG: before calling CloudAPI().RG().SetDefNet", map[string]any{"rg_id": rgId, "req": setDefNetReq})
|
||||
res, err := c.CloudAPI().RG().SetDefNet(ctx, setDefNetReq)
|
||||
tflog.Info(ctx, "SetDefNetCreateRG: response from CloudAPI().RG().SetDefNet", map[string]any{"rg_id": rgId, "response": res})
|
||||
if err != nil {
|
||||
diags.AddWarning(
|
||||
"SetDefNetCreateRG: Unable to setDefNet for RG",
|
||||
err.Error(),
|
||||
)
|
||||
return diags
|
||||
}
|
||||
}
|
||||
|
||||
if plan.DefNet.IsNull() {
|
||||
tflog.Info(ctx, "SetDefNetCreateRG: call for CloudAPI().RG().SetDefNet was not needed", map[string]any{
|
||||
"rg_id": rgId,
|
||||
"def_net_plan": plan.DefNet})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RGReadStatus loads rg resource by ids id, gets it current status. Performs restore and enable if needed for
|
||||
// Deleted status.
|
||||
// In case of failure returns errors.
|
||||
func RGReadStatus(ctx context.Context, state *models.ResourceRGModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "RGReadStatus: Read status rg with ID", map[string]any{"rg_id": state.Id.ValueString()})
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rgId, err := strconv.ParseUint(state.Id.ValueString(), 10, 64)
|
||||
if err != nil {
|
||||
diags.AddError("RGReadStatus: Cannot parse resource group ID from state", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
recordRG, err := RGCheckPresence(ctx, rgId, c)
|
||||
if err != nil {
|
||||
diags.AddError("RGReadStatus: Unable to Read RG before status check", err.Error())
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, "RGReadStatus: resource group values before status check", map[string]any{
|
||||
"rg_id": recordRG.ID,
|
||||
"updated_recordRG": recordRG})
|
||||
|
||||
// check resource status
|
||||
switch recordRG.Status {
|
||||
case status.Modeled:
|
||||
diags.AddError(
|
||||
"RG is in status Modeled",
|
||||
"please, contact support for more information",
|
||||
)
|
||||
return diags
|
||||
case status.Deleted:
|
||||
tflog.Info(ctx, "RGReadStatus: resource group with status.Deleted is being checked", map[string]any{
|
||||
"rg_id": recordRG.ID,
|
||||
"status": recordRG.Status})
|
||||
// restore and enable resource group in case it is required
|
||||
if state.Restore.IsNull() || state.Restore.ValueBool() { // default true or user set-up true
|
||||
diags.Append(RestoreRG(ctx, rgId, c)...)
|
||||
if diags.HasError() {
|
||||
tflog.Error(ctx, "RGReadStatus: cannot restore rg")
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, "RGReadStatus: resource group restored successfully", map[string]any{"rg_id": recordRG.ID})
|
||||
state.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
|
||||
|
||||
if state.Enable.IsNull() || state.Enable.ValueBool() { // default true or user set-up true
|
||||
err := EnableRG(ctx, rgId, state, c)
|
||||
if err != nil {
|
||||
diags.AddError(
|
||||
"RGReadStatus: Unable to Enable RG",
|
||||
err.Error(),
|
||||
)
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, "RGReadStatus: resource group enabled successfully", map[string]any{"rg_id": recordRG.ID})
|
||||
}
|
||||
}
|
||||
case status.Destroyed:
|
||||
diags.AddError(
|
||||
"RGReadStatus: RG is in status Destroyed",
|
||||
fmt.Sprintf("the resource with rg_id %d cannot be read because it has been destroyed", recordRG.ID),
|
||||
)
|
||||
return diags
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
Reference in New Issue
Block a user