This commit is contained in:
asteam
2024-07-25 14:33:38 +03:00
commit 6f40af6a5f
946 changed files with 98335 additions and 0 deletions

View File

@@ -0,0 +1,87 @@
package kvmvm
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceCompute{}
)
func NewDataSourceCompute() datasource.DataSource {
return &dataSourceCompute{}
}
// dataSourceCompute is the data source implementation.
type dataSourceCompute struct {
client *decort.DecortClient
}
func (d *dataSourceCompute) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
tflog.Info(ctx, "Start read data source compute")
// Read Terraform configuration data into the model
var state models.RecordComputeModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error read data source compute state")
return
}
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error set timeout")
return
}
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DataSourceCompute(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error flatten compute")
return
}
// Set state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "End read data source compute")
}
func (d *dataSourceCompute) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceCompute(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceCompute) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kvmvm"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceCompute) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get configure data source")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting configure data source successfully")
}

View File

@@ -0,0 +1,87 @@
package kvmvm
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceComputeAudits{}
)
func NewDataSourceComputeAudits() datasource.DataSource {
return &dataSourceComputeAudits{}
}
// dataSourceComputeAudits is the data source implementation.
type dataSourceComputeAudits struct {
client *decort.DecortClient
}
func (d *dataSourceComputeAudits) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
tflog.Info(ctx, "Start read data source compute audits")
// Read Terraform configuration data into the model
var state models.ListAuditsModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error read data source compute audits state")
return
}
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error set timeout")
return
}
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DataSourceComputeAudits(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error flatten compute audits")
return
}
// Set state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "End read data source compute audits")
}
func (d *dataSourceComputeAudits) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceComputeAudits(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceComputeAudits) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kvmvm_audits"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceComputeAudits) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get configure data source")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting configure data source successfully")
}

View File

@@ -0,0 +1,87 @@
package kvmvm
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceComputeGetAudits{}
)
func NewDataSourceComputeGetAudits() datasource.DataSource {
return &dataSourceComputeGetAudits{}
}
// dataSourceComputeGetAudits is the data source implementation.
type dataSourceComputeGetAudits struct {
client *decort.DecortClient
}
func (d *dataSourceComputeGetAudits) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
tflog.Info(ctx, "Start read data source compute get audits")
// Read Terraform configuration data into the model
var state models.GetAuditsModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error read data source compute get audits state")
return
}
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error set timeout")
return
}
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DataSourceComputeGetAudits(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error flatten compute get audits")
return
}
// Set state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "End read data source compute get audits")
}
func (d *dataSourceComputeGetAudits) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceComputeGetAudits(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceComputeGetAudits) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kvmvm_get_audits"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceComputeGetAudits) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get configure data source")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting configure data source successfully")
}

View File

@@ -0,0 +1,87 @@
package kvmvm
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceComputeGetConsoleUrl{}
)
func NewDataSourceComputeGetConsoleUrl() datasource.DataSource {
return &dataSourceComputeGetConsoleUrl{}
}
// dataSourceComputeGetConsoleUrl is the data source implementation.
type dataSourceComputeGetConsoleUrl struct {
client *decort.DecortClient
}
func (d *dataSourceComputeGetConsoleUrl) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
tflog.Info(ctx, "Start read data source compute get console url")
// Read Terraform configuration data into the model
var state models.GetConsoleUrlModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error read data source compute get console url state")
return
}
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error set timeout")
return
}
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DataSourceComputeGetConsoleUrl(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error flatten compute get console url")
return
}
// Set state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "End read data source compute get console url")
}
func (d *dataSourceComputeGetConsoleUrl) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceComputeGetConsoleUrl(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceComputeGetConsoleUrl) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kvmvm_get_console_url"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceComputeGetConsoleUrl) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get configure data source")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting configure data source successfully")
}

View File

@@ -0,0 +1,87 @@
package kvmvm
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceComputeGetLog{}
)
func NewDataSourceComputeGetLog() datasource.DataSource {
return &dataSourceComputeGetLog{}
}
// dataSourceComputeGetLog is the data source implementation.
type dataSourceComputeGetLog struct {
client *decort.DecortClient
}
func (d *dataSourceComputeGetLog) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
tflog.Info(ctx, "Start read data source compute get log")
// Read Terraform configuration data into the model
var state models.GetLogModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error read data source compute get log state")
return
}
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error set timeout")
return
}
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DataSourceComputeGetLog(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error flatten compute get log")
return
}
// Set state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "End read data source compute get log")
}
func (d *dataSourceComputeGetLog) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceComputeGetLog(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceComputeGetLog) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kvmvm_get_log"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceComputeGetLog) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get configure data source")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting configure data source successfully")
}

View File

@@ -0,0 +1,87 @@
package kvmvm
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceComputeList{}
)
func NewDataSourceComputeList() datasource.DataSource {
return &dataSourceComputeList{}
}
// dataSourceComputeList is the data source implementation.
type dataSourceComputeList struct {
client *decort.DecortClient
}
func (d *dataSourceComputeList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
tflog.Info(ctx, "Start read data source compute list")
// Read Terraform configuration data into the model
var state models.ListComputesModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error read data source compute list state")
return
}
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error set timeout")
return
}
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DataSourceComputeList(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error flatten compute list")
return
}
// Set state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "End read data source compute list")
}
func (d *dataSourceComputeList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceComputeList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceComputeList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kvmvm_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceComputeList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get configure data source")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting configure data source successfully")
}

View File

@@ -0,0 +1,87 @@
package kvmvm
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceComputeListDeleted{}
)
func NewDataSourceComputeListDeleted() datasource.DataSource {
return &dataSourceComputeListDeleted{}
}
// dataSourceComputeListDeleted is the data source implementation.
type dataSourceComputeListDeleted struct {
client *decort.DecortClient
}
func (d *dataSourceComputeListDeleted) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
tflog.Info(ctx, "Start read data source compute list deleted")
// Read Terraform configuration data into the model
var state models.ListDeletedComputesModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error read data source compute list deleted state")
return
}
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error set timeout")
return
}
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DataSourceComputeListDeleted(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error flatten compute list deleted")
return
}
// Set state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "End read data source compute list deleted")
}
func (d *dataSourceComputeListDeleted) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceComputeListDeleted(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceComputeListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kvmvm_list_deleted"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceComputeListDeleted) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get configure data source")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting configure data source successfully")
}

View File

@@ -0,0 +1,87 @@
package kvmvm
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceComputePciDeviceList{}
)
func NewDataSourceComputePciDeviceList() datasource.DataSource {
return &dataSourceComputePciDeviceList{}
}
// dataSourceComputePciDeviceList is the data source implementation.
type dataSourceComputePciDeviceList struct {
client *decort.DecortClient
}
func (d *dataSourceComputePciDeviceList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
tflog.Info(ctx, "Start read data source compute pci device list")
// Read Terraform configuration data into the model
var state models.ListPCIDevicesModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error read data source compute pci device list state")
return
}
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error set timeout")
return
}
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DataSourceComputePciDeviceList(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error flatten compute pci device list")
return
}
// Set state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "End read data source compute pci device list")
}
func (d *dataSourceComputePciDeviceList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceComputePciDeviceList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceComputePciDeviceList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kvmvm_pci_device_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceComputePciDeviceList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get configure data source")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting configure data source successfully")
}

View File

@@ -0,0 +1,87 @@
package kvmvm
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceComputePFWList{}
)
func NewDataSourceComputePFWList() datasource.DataSource {
return &dataSourceComputePFWList{}
}
// dataSourceComputePFWList is the data source implementation.
type dataSourceComputePFWList struct {
client *decort.DecortClient
}
func (d *dataSourceComputePFWList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
tflog.Info(ctx, "Start read data source compute pfw list")
// Read Terraform configuration data into the model
var state models.ListPFWsModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error read data source compute pfw list state")
return
}
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error set timeout")
return
}
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DataSourceComputePFWList(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error flatten compute pfw list")
return
}
// Set state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "End read data source compute pfw list")
}
func (d *dataSourceComputePFWList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceComputePFWList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceComputePFWList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kvmvm_pfw_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceComputePFWList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get configure data source")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting configure data source successfully")
}

View File

@@ -0,0 +1,87 @@
package kvmvm
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceComputeSnapshotUsage{}
)
func NewDataSourceComputeSnapshotUsage() datasource.DataSource {
return &dataSourceComputeSnapshotUsage{}
}
// dataSourceComputeSnapshotUsage is the data source implementation.
type dataSourceComputeSnapshotUsage struct {
client *decort.DecortClient
}
func (d *dataSourceComputeSnapshotUsage) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
tflog.Info(ctx, "Start read data source compute snapshot usage")
// Read Terraform configuration data into the model
var state models.ListSnapShotsModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error read data source compute snapshot usage state")
return
}
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error set timeout")
return
}
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DataSourceComputeSnapshotUsage(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error flatten compute snapshot usage")
return
}
// Set state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "End read data source compute snapshot usage")
}
func (d *dataSourceComputeSnapshotUsage) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceComputeSnapshotUsage(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceComputeSnapshotUsage) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kvmvm_snapshot_usage"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceComputeSnapshotUsage) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get configure data source")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting configure data source successfully")
}

View File

@@ -0,0 +1,87 @@
package kvmvm
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceComputeUserList{}
)
func NewDataSourceComputeUserList() datasource.DataSource {
return &dataSourceComputeUserList{}
}
// dataSourceComputeUserList is the data source implementation.
type dataSourceComputeUserList struct {
client *decort.DecortClient
}
func (d *dataSourceComputeUserList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
tflog.Info(ctx, "Start read data source compute user list")
// Read Terraform configuration data into the model
var state models.ListUsersModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error read data source compute user list state")
return
}
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error set timeout")
return
}
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DataSourceComputeUserList(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error flatten compute user list")
return
}
// Set state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "End read data source compute user list")
}
func (d *dataSourceComputeUserList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceComputeUserList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceComputeUserList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kvmvm_user_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceComputeUserList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get configure data source")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting configure data source successfully")
}

View File

@@ -0,0 +1,87 @@
package kvmvm
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceComputeVGPUList{}
)
func NewDataSourceComputeVGPUList() datasource.DataSource {
return &dataSourceComputeVGPUList{}
}
// dataSourceComputeVGPUList is the data source implementation.
type dataSourceComputeVGPUList struct {
client *decort.DecortClient
}
func (d *dataSourceComputeVGPUList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
tflog.Info(ctx, "Start read data source compute vgpu list")
// Read Terraform configuration data into the model
var state models.ListVGPUsModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error read data source compute vgpu list state")
return
}
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error set timeout")
return
}
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DataSourceComputeVGPUList(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error flatten compute vgpu list")
return
}
// Set state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "End read data source compute vgpu list")
}
func (d *dataSourceComputeVGPUList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceComputeVGPUList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceComputeVGPUList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kvmvm_vgpu_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceComputeVGPUList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get configure data source")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting configure data source successfully")
}

View File

@@ -0,0 +1,367 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
)
func DataSourceCompute(ctx context.Context, state *models.RecordComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start FlattenDataSourceCompute")
diags := diag.Diagnostics{}
computeRecord, err := utilities.ComputeCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Error get compute info", err.Error())
return diags
}
id := uuid.New()
customFields, _ := json.Marshal(computeRecord.CustomFields)
devices, _ := json.Marshal(computeRecord.Devices)
userdata, _ := json.Marshal(computeRecord.Userdata)
*state = models.RecordComputeModel{
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
ComputeId: state.ComputeId,
ACL: flattenACL(ctx, &computeRecord.ACL),
AccountID: types.Int64Value(int64(computeRecord.AccountID)),
AccountName: types.StringValue(computeRecord.AccountName),
AffinityLabel: types.StringValue(computeRecord.AffinityLabel),
AffinityRules: flattenAffinityRule(ctx, &computeRecord.AffinityRules),
AffinityWeight: types.Int64Value(int64(computeRecord.AffinityWeight)),
AntiAffinityRules: flattenAffinityRule(ctx, &computeRecord.AntiAffinityRules),
Architecture: types.StringValue(computeRecord.Architecture),
BootDiskSize: types.Int64Value(int64(computeRecord.BootDiskSize)),
CdImageId: types.Int64Value(int64(computeRecord.CdImageId)),
CloneReference: types.Int64Value(int64(computeRecord.CloneReference)),
ComputeCIID: types.Int64Value(int64(computeRecord.ComputeCIID)),
CPU: types.Int64Value(int64(computeRecord.CPU)),
CPUPin: types.BoolValue(computeRecord.CPUPin),
CreatedBy: types.StringValue(computeRecord.CreatedBy),
CreatedTime: types.Int64Value(int64(computeRecord.CreatedTime)),
CustomFields: types.StringValue(string(customFields)),
DeletedBy: types.StringValue(computeRecord.DeletedBy),
DeletedTime: types.Int64Value(int64(computeRecord.DeletedTime)),
Description: types.StringValue(computeRecord.Description),
Devices: types.StringValue(string(devices)),
Disks: flattenDisks(ctx, &computeRecord.Disks),
Driver: types.StringValue(computeRecord.Driver),
GID: types.Int64Value(int64(computeRecord.GID)),
GUID: types.Int64Value(int64(computeRecord.GUID)),
HPBacked: types.BoolValue(computeRecord.HPBacked),
ImageID: types.Int64Value(int64(computeRecord.ImageID)),
ImageName: types.StringValue(computeRecord.ImageName),
Interfaces: flattenInterfaces(ctx, &computeRecord.Interfaces),
LockStatus: types.StringValue(computeRecord.LockStatus),
ManagerID: types.Int64Value(int64(computeRecord.ManagerID)),
ManagerType: types.StringValue(computeRecord.ManagerType),
MigrationJob: types.Int64Value(int64(computeRecord.MigrationJob)),
Milestones: types.Int64Value(int64(computeRecord.Milestones)),
Name: types.StringValue(computeRecord.Name),
NatableVINSID: types.Int64Value(int64(computeRecord.NatableVINSID)),
NatableVINSIP: types.StringValue(computeRecord.NatableVINSIP),
NatableVINSName: types.StringValue(computeRecord.NatableVINSName),
NatableVINSNetwork: types.StringValue(computeRecord.NatableVINSNetwork),
NatableVINSNetworkName: types.StringValue(computeRecord.NatableVINSNetworkName),
NeedReboot: types.BoolValue(computeRecord.NeedReboot),
NumaAffinity: types.StringValue(computeRecord.NumaAffinity),
NumaNodeId: types.Int64Value(int64(computeRecord.NumaNodeId)),
OSUsers: flattenOSUsers(ctx, &computeRecord.OSUsers),
Pinned: types.BoolValue(computeRecord.Pinned),
RAM: types.Int64Value(int64(computeRecord.RAM)),
ReferenceID: types.StringValue(computeRecord.ReferenceID),
Registered: types.BoolValue(computeRecord.Registered),
ResName: types.StringValue(computeRecord.ResName),
RGID: types.Int64Value(int64(computeRecord.RGID)),
RGName: types.StringValue(computeRecord.RGName),
SnapSets: flattenSnapSet(ctx, &computeRecord.SnapSets),
StatelessSepID: types.Int64Value(int64(computeRecord.StatelessSepID)),
StatelessSepType: types.StringValue(computeRecord.StatelessSepType),
Status: types.StringValue(computeRecord.Status),
TechStatus: types.StringValue(computeRecord.TechStatus),
UpdatedBy: types.StringValue(computeRecord.UpdatedBy),
UpdatedTime: types.Int64Value(int64(computeRecord.UpdatedTime)),
UserManaged: types.BoolValue(computeRecord.UserManaged),
Userdata: types.StringValue(string(userdata)),
VirtualImageID: types.Int64Value(int64(computeRecord.VirtualImageID)),
VirtualImageName: types.StringValue(computeRecord.VirtualImageName),
}
state.BootOrder, diags = types.ListValueFrom(ctx, types.StringType, computeRecord.BootOrder)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error FlattenDataSourceCompute", diags))
}
state.Clones, diags = types.ListValueFrom(ctx, types.Int64Type, computeRecord.Clones)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error FlattenDataSourceCompute", diags))
}
state.Tags, diags = types.MapValueFrom(ctx, types.StringType, computeRecord.Tags)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error FlattenDataSourceCompute", diags))
}
state.ReservedNodeCpus, diags = types.ListValueFrom(ctx, types.Int64Type, computeRecord.ReservedNodeCpus)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error FlattenDataSourceCompute", diags))
}
state.VGPUs, diags = types.ListValueFrom(ctx, types.Int64Type, computeRecord.VGPUs)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error FlattenDataSourceCompute", diags))
}
tflog.Info(ctx, "End FlattenDataSourceCompute")
return nil
}
func flattenACL(ctx context.Context, acl *compute.RecordACL) *models.RecordACLModel {
tflog.Info(ctx, "Start flattenACL")
res := models.RecordACLModel{
AccountACL: flattenACLList(ctx, &acl.AccountACL),
ComputeACL: flattenACLList(ctx, &acl.ComputeACL),
RGACL: flattenACLList(ctx, &acl.RGACL),
}
tflog.Info(ctx, "End flattenACL")
return &res
}
func flattenACLList(ctx context.Context, acl *compute.ListACL) []models.ItemACLModel {
tflog.Info(ctx, "Start flattenACLList")
res := make([]models.ItemACLModel, 0, len(*acl))
for _, item := range *acl {
temp := models.ItemACLModel{
Explicit: types.BoolValue(bool(item.Explicit)),
GUID: types.StringValue(item.GUID),
Right: types.StringValue(item.Right),
Status: types.StringValue(item.Status),
Type: types.StringValue(item.Type),
UserGroupID: types.StringValue(item.UserGroupID),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenACLList")
return res
}
func flattenAffinityRule(ctx context.Context, rules *compute.ListRules) []models.ItemRuleModel {
tflog.Info(ctx, "Start flattenAffinityRule")
res := make([]models.ItemRuleModel, 0, len(*rules))
for _, item := range *rules {
temp := models.ItemRuleModel{
GUID: types.StringValue(item.GUID),
Key: types.StringValue(item.Key),
Mode: types.StringValue(item.Mode),
Policy: types.StringValue(item.Policy),
Topology: types.StringValue(item.Topology),
Value: types.StringValue(item.Value),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenAffinityRule")
return res
}
func flattenDisks(ctx context.Context, disks *compute.ListComputeDisks) []models.ItemDiskModel {
tflog.Info(ctx, "Start flattenDisks")
var diags diag.Diagnostics
res := make([]models.ItemDiskModel, 0, len(*disks))
for _, item := range *disks {
acl, _ := json.Marshal(item.ACL)
temp := models.ItemDiskModel{
CKey: types.StringValue(item.CKey),
ACL: types.StringValue(string(acl)),
AccountID: types.Int64Value(int64(item.AccountID)),
BootPartition: types.Int64Value(int64(item.BootPartition)),
CreatedTime: types.Int64Value(int64(item.CreatedTime)),
DeletedTime: types.Int64Value(int64(item.CreatedTime)),
Description: types.StringValue(item.Description),
DestructionTime: types.Int64Value(int64(item.DestructionTime)),
DiskPath: types.StringValue(item.DiskPath),
GID: types.Int64Value(int64(item.GID)),
GUID: types.Int64Value(int64(item.GUID)),
ID: types.Int64Value(int64(item.ID)),
ImageID: types.Int64Value(int64(item.ImageID)),
IOTune: &models.IOTuneModel{
ReadBytesSec: types.Int64Value(int64(item.IOTune.ReadBytesSec)),
ReadBytesSecMax: types.Int64Value(int64(item.IOTune.ReadBytesSecMax)),
ReadIOPSSec: types.Int64Value(int64(item.IOTune.ReadIOPSSec)),
ReadIOPSSecMax: types.Int64Value(int64(item.IOTune.ReadIOPSSecMax)),
SizeIOPSSec: types.Int64Value(int64(item.IOTune.SizeIOPSSec)),
TotalBytesSec: types.Int64Value(int64(item.IOTune.TotalBytesSec)),
TotalBytesSecMax: types.Int64Value(int64(item.IOTune.TotalBytesSecMax)),
TotalIOPSSec: types.Int64Value(int64(item.IOTune.TotalIOPSSec)),
TotalIOPSSecMax: types.Int64Value(int64(item.IOTune.TotalIOPSSecMax)),
WriteBytesSec: types.Int64Value(int64(item.IOTune.WriteBytesSec)),
WriteBytesSecMax: types.Int64Value(int64(item.IOTune.WriteBytesSecMax)),
WriteIOPSSec: types.Int64Value(int64(item.IOTune.WriteIOPSSec)),
WriteIOPSSecMax: types.Int64Value(int64(item.IOTune.WriteIOPSSecMax)),
},
IQN: types.StringValue(item.IQN),
Login: types.StringValue(item.Login),
Milestones: types.Int64Value(int64(item.Milestones)),
Name: types.StringValue(item.Name),
Order: types.Int64Value(int64(item.Order)),
Params: types.StringValue(item.Params),
ParentID: types.Int64Value(int64(item.ParentID)),
Passwd: types.StringValue(item.Passwd),
Pool: types.StringValue(item.Pool),
PCISlot: types.Int64Value(item.PCISlot),
PurgeTime: types.Int64Value(int64(item.PurgeTime)),
RealityDeviceNumber: types.Int64Value(int64(item.RealityDeviceNumber)),
Replication: &models.ReplicationModel{
DiskID: types.Int64Value(int64(item.Replication.DiskID)),
PoolID: types.StringValue(item.Replication.PoolID),
Role: types.StringValue(item.Replication.Role),
SelfVolumeID: types.StringValue(item.Replication.SelfVolumeID),
StorageID: types.StringValue(item.Replication.StorageID),
VolumeID: types.StringValue(item.Replication.VolumeID),
},
ResID: types.StringValue(item.ResID),
Role: types.StringValue(item.Role),
SepID: types.Int64Value(int64(item.SepID)),
Shareable: types.BoolValue(item.Shareable),
SizeMax: types.Int64Value(int64(item.SizeMax)),
SizeUsed: types.Float64Value(item.SizeUsed),
Snapshots: flattenSnapshotExtend(ctx, &item.Snapshots),
Status: types.StringValue(item.Status),
TechStatus: types.StringValue(item.TechStatus),
Type: types.StringValue(item.Type),
VMID: types.Int64Value(int64(item.VMID)),
}
temp.Images, diags = types.ListValueFrom(ctx, types.Int64Type, item.Images)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenDisks", diags))
}
temp.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, item.PresentTo)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenDisks", diags))
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenDisks")
return res
}
func flattenSnapshotExtend(ctx context.Context, snapshot *compute.SnapshotExtendList) []models.ItemSnapshotExtendModel {
tflog.Info(ctx, "Start flattenSnapshotExtend")
res := make([]models.ItemSnapshotExtendModel, 0, len(*snapshot))
for _, item := range *snapshot {
temp := models.ItemSnapshotExtendModel{
GUID: types.StringValue(item.GUID),
Label: types.StringValue(item.Label),
ReferenceID: types.StringValue(item.ReferenceID),
ResID: types.StringValue(item.ResID),
SnapSetGUID: types.StringValue(item.SnapSetGUID),
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
TimeStamp: types.Int64Value(int64(item.TimeStamp)),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenSnapshotExtend")
return res
}
func flattenInterfaces(ctx context.Context, interfaces *compute.ListInterfaces) []models.ItemInterfaceModel {
tflog.Info(ctx, "Start flattenInterfaces")
var diags diag.Diagnostics
res := make([]models.ItemInterfaceModel, 0, len(*interfaces))
for _, item := range *interfaces {
temp := models.ItemInterfaceModel{
ConnID: types.Int64Value(int64(item.ConnID)),
ConnType: types.StringValue(item.ConnType),
DefGW: types.StringValue(item.DefGW),
Enabled: types.BoolValue(item.Enabled),
FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)),
GUID: types.StringValue(item.GUID),
IPAddress: types.StringValue(item.IPAddress),
ListenSSH: types.BoolValue(item.ListenSSH),
MAC: types.StringValue(item.MAC),
Name: types.StringValue(item.Name),
NetID: types.Int64Value(int64(item.NetID)),
NetMask: types.Int64Value(int64(item.NetMask)),
NetType: types.StringValue(item.NetType),
NodeID: types.Int64Value(int64(item.NodeID)),
PCISlot: types.Int64Value(item.PCISlot),
QOS: &models.QOSModel{
ERate: types.Int64Value(int64(item.QOS.ERate)),
GUID: types.StringValue(item.QOS.GUID),
InBurst: types.Int64Value(int64(item.QOS.InBurst)),
InRate: types.Int64Value(int64(item.QOS.InRate)),
},
Target: types.StringValue(item.Target),
Type: types.StringValue(item.Type),
}
temp.VNFs, diags = types.ListValueFrom(ctx, types.Int64Type, item.VNFs)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenInterfaces", diags))
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenInterfaces")
return res
}
func flattenSnapSet(ctx context.Context, snapSet *compute.ListSnapSets) []models.ItemSnapSetModel {
tflog.Info(ctx, "Start flattenSnapSet")
var diags diag.Diagnostics
res := make([]models.ItemSnapSetModel, 0, len(*snapSet))
for _, item := range *snapSet {
temp := models.ItemSnapSetModel{
GUID: types.StringValue(item.GUID),
Label: types.StringValue(item.Label),
Timestamp: types.Int64Value(int64(item.Timestamp)),
}
temp.Disks, diags = types.ListValueFrom(ctx, types.Int64Type, item.Disks)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenSnapSet", diags))
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenSnapSet")
return res
}
func flattenOSUsers(ctx context.Context, osUsers *compute.ListOSUser) []models.ItemOSUserModel {
tflog.Info(ctx, "Start flattenOSUsers")
res := make([]models.ItemOSUserModel, 0, len(*osUsers))
for _, item := range *osUsers {
temp := models.ItemOSUserModel{
GUID: types.StringValue(item.GUID),
Login: types.StringValue(item.Login),
Password: types.StringValue(item.Password),
PubKey: types.StringValue(item.PubKey),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenOSUsers")
return res
}

View File

@@ -0,0 +1,57 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
)
func DataSourceComputeAudits(ctx context.Context, state *models.ListAuditsModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start FlattenDataSourceComputeAudits")
diags := diag.Diagnostics{}
auditsList, err := utilities.ComputeAuditsCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Error get compute audits info", err.Error())
return diags
}
id := uuid.New()
*state = models.ListAuditsModel{
ComputeID: state.ComputeID,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
Items: flattenItemsAuditsList(ctx, auditsList),
}
tflog.Info(ctx, "End FlattenDataSourceComputeAudits")
return nil
}
func flattenItemsAuditsList(ctx context.Context, audits *compute.ListAudits) []models.ItemAuditModel {
tflog.Info(ctx, "Start flattenItemsAuditsList")
res := make([]models.ItemAuditModel, 0, len(*audits))
for _, item := range *audits {
temp := models.ItemAuditModel{
Call: types.StringValue(item.Call),
ResponseTime: types.Float64Value(item.ResponseTime),
StatusCode: types.Int64Value(int64(item.StatusCode)),
Timestamp: types.Float64Value(item.Timestamp),
User: types.StringValue(item.User),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenItemsAuditsList")
return res
}

View File

@@ -0,0 +1,54 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
)
func DataSourceComputeGetAudits(ctx context.Context, state *models.GetAuditsModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start FlattenDataSourceComputeGetAudits")
diags := diag.Diagnostics{}
auditsShortList, err := utilities.ComputeGetAuditsCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Error get compute get audits info", err.Error())
return diags
}
id := uuid.New()
*state = models.GetAuditsModel{
ComputeID: state.ComputeID,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
Items: flattenItemsShort(ctx, auditsShortList),
}
tflog.Info(ctx, "End FlattenDataSourceComputeGetAudits")
return nil
}
func flattenItemsShort(ctx context.Context, audits *compute.ListShortAudits) []models.ItemShortAuditModel {
tflog.Info(ctx, "Start flattenItemsShort")
res := make([]models.ItemShortAuditModel, 0, len(*audits))
for _, item := range *audits {
temp := models.ItemShortAuditModel{
Epoch: types.Float64Value(item.Epoch),
Message: types.StringValue(item.Message),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenItemsShort")
return res
}

View File

@@ -0,0 +1,37 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
)
func DataSourceComputeGetConsoleUrl(ctx context.Context, state *models.GetConsoleUrlModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start FlattenDataSourceComputeGetConsoleUrl")
diags := diag.Diagnostics{}
consoleUrl, err := utilities.ComputeGetConsoleUrlCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Error get compute console url", err.Error())
return diags
}
id := uuid.New()
*state = models.GetConsoleUrlModel{
ComputeID: state.ComputeID,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
ConsoleURL: types.StringValue(consoleUrl),
}
tflog.Info(ctx, "End FlattenDataSourceComputeGetConsoleUrl")
return nil
}

View File

@@ -0,0 +1,38 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
)
func DataSourceComputeGetLog(ctx context.Context, state *models.GetLogModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start FlattenDataSourceComputeGetLog")
diags := diag.Diagnostics{}
log, err := utilities.ComputeGetLogCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Error get compute log", err.Error())
return diags
}
id := uuid.New()
*state = models.GetLogModel{
ComputeID: state.ComputeID,
Path: state.Path,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
Log: types.StringValue(log),
}
tflog.Info(ctx, "End FlattenDataSourceComputeGetLog")
return nil
}

View File

@@ -0,0 +1,272 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
)
func DataSourceComputeList(ctx context.Context, state *models.ListComputesModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start FlattenDataSourceComputeList")
diags := diag.Diagnostics{}
list, err := utilities.ComputeListCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Error get compute list info", err.Error())
return diags
}
id := uuid.New()
*state = models.ListComputesModel{
ByID: state.ByID,
Name: state.Name,
AccountID: state.AccountID,
RGName: state.RGName,
RGID: state.RGID,
TechStatus: state.TechStatus,
Status: state.Status,
IPAddress: state.IPAddress,
ExtNetName: state.ExtNetName,
ExtNetID: state.ExtNetID,
IncludeDeleted: state.IncludeDeleted,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
IgnoreK8s: state.IgnoreK8s,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
Items: flattenItemsList(ctx, list),
EntryCount: types.Int64Value(int64(list.EntryCount)),
}
tflog.Info(ctx, "End FlattenDataSourceComputeList")
return nil
}
func flattenItemsList(ctx context.Context, computes *compute.ListComputes) []models.ItemComputeModel {
tflog.Info(ctx, "Start flattenItemsList")
var diags diag.Diagnostics
res := make([]models.ItemComputeModel, 0, len(computes.Data))
for _, item := range computes.Data {
customFields, _ := json.Marshal(item.CustomFields)
devices, _ := json.Marshal(item.Devices)
temp := models.ItemComputeModel{
ACL: flattenACLInList(ctx, &item.ACL),
AccountID: types.Int64Value(int64(item.AccountID)),
AccountName: types.StringValue(item.AccountName),
AffinityLabel: types.StringValue(item.AffinityLabel),
AffinityRules: flattenAffinityRuleInList(ctx, &item.AffinityRules),
AffinityWeight: types.Int64Value(int64(item.AffinityWeight)),
AntiAffinityRules: flattenAffinityRuleInList(ctx, &item.AntiAffinityRules),
Architecture: types.StringValue(item.Architecture),
BootDiskSize: types.Int64Value(int64(item.BootDiskSize)),
CdImageId: types.Int64Value(int64(item.CdImageId)),
CloneReference: types.Int64Value(int64(item.CloneReference)),
ComputeCIID: types.Int64Value(int64(item.ComputeCIID)),
CPU: types.Int64Value(int64(item.CPU)),
CPUPin: types.BoolValue(item.CPUPin),
CreatedBy: types.StringValue(item.CreatedBy),
CreatedTime: types.Int64Value(int64(item.CreatedTime)),
CustomFields: types.StringValue(string(customFields)),
DeletedBy: types.StringValue(item.DeletedBy),
DeletedTime: types.Int64Value(int64(item.DeletedTime)),
Description: types.StringValue(item.Description),
Devices: types.StringValue(string(devices)),
Disks: flattenDisksInList(ctx, &item.Disks),
Driver: types.StringValue(item.Driver),
GID: types.Int64Value(int64(item.GID)),
GUID: types.Int64Value(int64(item.GUID)),
HPBacked: types.BoolValue(item.HPBacked),
ComputeId: types.Int64Value(int64(item.ID)),
ImageID: types.Int64Value(int64(item.ImageID)),
Interfaces: flattenInterfaceInList(ctx, &item.Interfaces),
LockStatus: types.StringValue(item.LockStatus),
ManagerID: types.Int64Value(int64(item.ManagerID)),
ManagerType: types.StringValue(item.ManagerType),
MigrationJob: types.Int64Value(int64(item.MigrationJob)),
Milestones: types.Int64Value(int64(item.Milestones)),
Name: types.StringValue(item.Name),
NeedReboot: types.BoolValue(item.NeedReboot),
NumaAffinity: types.StringValue(item.NumaAffinity),
NumaNodeId: types.Int64Value(int64(item.NumaNodeId)),
Pinned: types.BoolValue(item.Pinned),
RAM: types.Int64Value(int64(item.RAM)),
ReferenceID: types.StringValue(item.ReferenceID),
Registered: types.BoolValue(item.Registered),
ResName: types.StringValue(item.ResName),
RGID: types.Int64Value(int64(item.RGID)),
RGName: types.StringValue(item.RGName),
SnapSets: flattenSnapSetInList(ctx, &item.SnapSets),
StatelessSepID: types.Int64Value(int64(item.StatelessSepID)),
StatelessSepType: types.StringValue(item.StatelessSepType),
Status: types.StringValue(item.Status),
TechStatus: types.StringValue(item.TechStatus),
TotalDiskSize: types.Int64Value(int64(item.TotalDiskSize)),
UpdatedBy: types.StringValue(item.UpdatedBy),
UpdatedTime: types.Int64Value(int64(item.UpdatedTime)),
UserManaged: types.BoolValue(item.UserManaged),
VINSConnected: types.Int64Value(int64(item.VINSConnected)),
VirtualImageID: types.Int64Value(int64(item.VirtualImageID)),
}
temp.BootOrder, diags = types.ListValueFrom(ctx, types.StringType, item.BootOrder)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenItemsList", diags))
}
temp.Clones, diags = types.ListValueFrom(ctx, types.Int64Type, item.Clones)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenItemsList", diags))
}
temp.Tags, diags = types.MapValueFrom(ctx, types.StringType, item.Tags)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenItemsList", diags))
}
temp.ReservedNodeCpus, diags = types.ListValueFrom(ctx, types.Int64Type, item.ReservedNodeCpus)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenItemsList", diags))
}
temp.VGPUs, diags = types.ListValueFrom(ctx, types.Int64Type, item.VGPUs)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenItemsList", diags))
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenItemsList")
return res
}
func flattenACLInList(ctx context.Context, acl *compute.ListACL) []models.ItemACLInListModel {
tflog.Info(ctx, "Start flattenACLInList")
res := make([]models.ItemACLInListModel, 0, len(*acl))
for _, item := range *acl {
temp := models.ItemACLInListModel{
Explicit: types.BoolValue(bool(item.Explicit)),
GUID: types.StringValue(item.GUID),
Right: types.StringValue(item.Right),
Status: types.StringValue(item.Status),
Type: types.StringValue(item.Type),
UserGroupID: types.StringValue(item.UserGroupID),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenACLInList")
return res
}
func flattenAffinityRuleInList(ctx context.Context, rules *compute.ListRules) []models.ItemRuleInListModel {
tflog.Info(ctx, "Start flattenAffinityRuleInList")
res := make([]models.ItemRuleInListModel, 0, len(*rules))
for _, item := range *rules {
temp := models.ItemRuleInListModel{
GUID: types.StringValue(item.GUID),
Key: types.StringValue(item.Key),
Mode: types.StringValue(item.Mode),
Policy: types.StringValue(item.Policy),
Topology: types.StringValue(item.Topology),
Value: types.StringValue(item.Value),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenAffinityRuleInList")
return res
}
func flattenDisksInList(ctx context.Context, disks *compute.ListInfoDisks) []models.DiskInListModel {
tflog.Info(ctx, "Start flattenDisksInList")
res := make([]models.DiskInListModel, 0, len(*disks))
for _, item := range *disks {
temp := models.DiskInListModel{
DiskId: types.Int64Value(int64(item.ID)),
PCISlot: types.Int64Value(item.PCISlot),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenDisksInList")
return res
}
func flattenInterfaceInList(ctx context.Context, interfaces *compute.ListInterfaces) []models.ItemVNFInterfaceInListModel {
tflog.Info(ctx, "Start flattenInterfaceInList")
var diags diag.Diagnostics
res := make([]models.ItemVNFInterfaceInListModel, 0, len(*interfaces))
for _, item := range *interfaces {
temp := models.ItemVNFInterfaceInListModel{
ConnID: types.Int64Value(int64(item.ConnID)),
ConnType: types.StringValue(item.ConnType),
DefGW: types.StringValue(item.DefGW),
Enabled: types.BoolValue(item.Enabled),
FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)),
GUID: types.StringValue(item.GUID),
IPAddress: types.StringValue(item.IPAddress),
ListenSSH: types.BoolValue(item.ListenSSH),
MAC: types.StringValue(item.MAC),
Name: types.StringValue(item.Name),
NetID: types.Int64Value(int64(item.NetID)),
NetMask: types.Int64Value(int64(item.NetMask)),
NetType: types.StringValue(item.NetType),
NodeID: types.Int64Value(int64(item.NodeID)),
PCISlot: types.Int64Value(item.PCISlot),
QOS: &models.QOSInListModel{
ERate: types.Int64Value(int64(item.QOS.ERate)),
GUID: types.StringValue(item.QOS.GUID),
InBurst: types.Int64Value(int64(item.QOS.InBurst)),
InRate: types.Int64Value(int64(item.QOS.InRate)),
},
Target: types.StringValue(item.Target),
Type: types.StringValue(item.Type),
}
temp.VNFs, diags = types.ListValueFrom(ctx, types.Int64Type, item.VNFs)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenInterfaceInList", diags))
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenInterfaceInList")
return res
}
func flattenSnapSetInList(ctx context.Context, snapSet *compute.ListSnapSets) []models.ItemSnapSetInListModel {
tflog.Info(ctx, "Start flattenSnapSetInList")
var diags diag.Diagnostics
res := make([]models.ItemSnapSetInListModel, 0, len(*snapSet))
for _, item := range *snapSet {
temp := models.ItemSnapSetInListModel{
GUID: types.StringValue(item.GUID),
Label: types.StringValue(item.Label),
Timestamp: types.Int64Value(int64(item.Timestamp)),
}
temp.Disks, diags = types.ListValueFrom(ctx, types.Int64Type, item.Disks)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenSnapSetInList", diags))
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenSnapSetInList")
return res
}

View File

@@ -0,0 +1,270 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
)
func DataSourceComputeListDeleted(ctx context.Context, state *models.ListDeletedComputesModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start FlattenDataSourceComputeListDeleted")
diags := diag.Diagnostics{}
listDeleted, err := utilities.ComputeListDeletedCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Error get compute list deleted info", err.Error())
return diags
}
id := uuid.New()
*state = models.ListDeletedComputesModel{
ByID: state.ByID,
Name: state.Name,
AccountID: state.AccountID,
RGName: state.RGName,
RGID: state.RGID,
TechStatus: state.TechStatus,
IPAddress: state.IPAddress,
ExtNetName: state.ExtNetName,
ExtNetID: state.ExtNetID,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
IgnoreK8s: state.IgnoreK8s,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
Items: flattenItemsListDeleted(ctx, listDeleted),
EntryCount: types.Int64Value(int64(listDeleted.EntryCount)),
}
tflog.Info(ctx, "End FlattenDataSourceComputeListDeleted")
return nil
}
func flattenItemsListDeleted(ctx context.Context, computes *compute.ListComputes) []models.ItemListDeletedComputeModel {
tflog.Info(ctx, "Start flattenItemsListDeleted")
var diags diag.Diagnostics
res := make([]models.ItemListDeletedComputeModel, 0, len(computes.Data))
for _, item := range computes.Data {
customFields, _ := json.Marshal(item.CustomFields)
devices, _ := json.Marshal(item.Devices)
temp := models.ItemListDeletedComputeModel{
ACL: flattenACLInListDeleted(ctx, &item.ACL),
AccountID: types.Int64Value(int64(item.AccountID)),
AccountName: types.StringValue(item.AccountName),
AffinityLabel: types.StringValue(item.AffinityLabel),
AffinityRules: flattenAffinityRuleInListDeleted(ctx, &item.AffinityRules),
AffinityWeight: types.Int64Value(int64(item.AffinityWeight)),
AntiAffinityRules: flattenAffinityRuleInListDeleted(ctx, &item.AntiAffinityRules),
Architecture: types.StringValue(item.Architecture),
BootDiskSize: types.Int64Value(int64(item.BootDiskSize)),
CdImageId: types.Int64Value(int64(item.CdImageId)),
CloneReference: types.Int64Value(int64(item.CloneReference)),
ComputeCIID: types.Int64Value(int64(item.ComputeCIID)),
CPU: types.Int64Value(int64(item.CPU)),
CPUPin: types.BoolValue(item.CPUPin),
CreatedBy: types.StringValue(item.CreatedBy),
CreatedTime: types.Int64Value(int64(item.CreatedTime)),
CustomFields: types.StringValue(string(customFields)),
DeletedBy: types.StringValue(item.DeletedBy),
DeletedTime: types.Int64Value(int64(item.DeletedTime)),
Description: types.StringValue(item.Description),
Devices: types.StringValue(string(devices)),
Disks: flattenDisksInListDeleted(ctx, &item.Disks),
Driver: types.StringValue(item.Driver),
GID: types.Int64Value(int64(item.GID)),
GUID: types.Int64Value(int64(item.GUID)),
HPBacked: types.BoolValue(item.HPBacked),
ComputeId: types.Int64Value(int64(item.ID)),
ImageID: types.Int64Value(int64(item.ImageID)),
Interfaces: flattenInterfaceInListDeleted(ctx, &item.Interfaces),
LockStatus: types.StringValue(item.LockStatus),
ManagerID: types.Int64Value(int64(item.ManagerID)),
ManagerType: types.StringValue(item.ManagerType),
MigrationJob: types.Int64Value(int64(item.MigrationJob)),
Milestones: types.Int64Value(int64(item.Milestones)),
Name: types.StringValue(item.Name),
NeedReboot: types.BoolValue(item.NeedReboot),
NumaAffinity: types.StringValue(item.NumaAffinity),
NumaNodeId: types.Int64Value(int64(item.NumaNodeId)),
Pinned: types.BoolValue(item.Pinned),
RAM: types.Int64Value(int64(item.RAM)),
ReferenceID: types.StringValue(item.ReferenceID),
Registered: types.BoolValue(item.Registered),
ResName: types.StringValue(item.ResName),
RGID: types.Int64Value(int64(item.RGID)),
RGName: types.StringValue(item.RGName),
SnapSets: flattenSnapSetInListDeleted(ctx, &item.SnapSets),
StatelessSepID: types.Int64Value(int64(item.StatelessSepID)),
StatelessSepType: types.StringValue(item.StatelessSepType),
Status: types.StringValue(item.Status),
TechStatus: types.StringValue(item.TechStatus),
TotalDiskSize: types.Int64Value(int64(item.TotalDiskSize)),
UpdatedBy: types.StringValue(item.UpdatedBy),
UpdatedTime: types.Int64Value(int64(item.UpdatedTime)),
UserManaged: types.BoolValue(item.UserManaged),
VINSConnected: types.Int64Value(int64(item.VINSConnected)),
VirtualImageID: types.Int64Value(int64(item.VirtualImageID)),
}
temp.BootOrder, diags = types.ListValueFrom(ctx, types.StringType, item.BootOrder)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenItemsListDeleted", diags))
}
temp.Clones, diags = types.ListValueFrom(ctx, types.Int64Type, item.Clones)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenItemsListDeleted", diags))
}
temp.Tags, diags = types.MapValueFrom(ctx, types.StringType, item.Tags)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenItemsListDeleted", diags))
}
temp.ReservedNodeCpus, diags = types.ListValueFrom(ctx, types.Int64Type, item.ReservedNodeCpus)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenItemsList", diags))
}
temp.VGPUs, diags = types.ListValueFrom(ctx, types.Int64Type, item.VGPUs)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenItemsListDeleted", diags))
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenItemsListDeleted")
return res
}
func flattenACLInListDeleted(ctx context.Context, acl *compute.ListACL) []models.ItemACLInListDeletedModel {
tflog.Info(ctx, "Start flattenACLInListDeleted")
res := make([]models.ItemACLInListDeletedModel, 0, len(*acl))
for _, item := range *acl {
temp := models.ItemACLInListDeletedModel{
Explicit: types.BoolValue(bool(item.Explicit)),
GUID: types.StringValue(item.GUID),
Right: types.StringValue(item.Right),
Status: types.StringValue(item.Status),
Type: types.StringValue(item.Type),
UserGroupID: types.StringValue(item.UserGroupID),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenACLInListDeleted")
return res
}
func flattenAffinityRuleInListDeleted(ctx context.Context, rules *compute.ListRules) []models.ItemRuleInListDeletedModel {
tflog.Info(ctx, "Start flattenAffinityRuleInListDeleted")
res := make([]models.ItemRuleInListDeletedModel, 0, len(*rules))
for _, item := range *rules {
temp := models.ItemRuleInListDeletedModel{
GUID: types.StringValue(item.GUID),
Key: types.StringValue(item.Key),
Mode: types.StringValue(item.Mode),
Policy: types.StringValue(item.Policy),
Topology: types.StringValue(item.Topology),
Value: types.StringValue(item.Value),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenAffinityRuleInListDeleted")
return res
}
func flattenDisksInListDeleted(ctx context.Context, disks *compute.ListInfoDisks) []models.DiskInListDeletedModel {
tflog.Info(ctx, "Start flattenDisksInListDeleted")
res := make([]models.DiskInListDeletedModel, 0, len(*disks))
for _, item := range *disks {
temp := models.DiskInListDeletedModel{
DiskId: types.Int64Value(int64(item.ID)),
PCISlot: types.Int64Value(item.PCISlot),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenDisksInListDeleted")
return res
}
func flattenInterfaceInListDeleted(ctx context.Context, interfaces *compute.ListInterfaces) []models.ItemVNFInterfaceInListDeletedModel {
tflog.Info(ctx, "Start flattenInterfaceInListDeleted")
var diags diag.Diagnostics
res := make([]models.ItemVNFInterfaceInListDeletedModel, 0, len(*interfaces))
for _, item := range *interfaces {
temp := models.ItemVNFInterfaceInListDeletedModel{
ConnID: types.Int64Value(int64(item.ConnID)),
ConnType: types.StringValue(item.ConnType),
DefGW: types.StringValue(item.DefGW),
Enabled: types.BoolValue(item.Enabled),
FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)),
GUID: types.StringValue(item.GUID),
IPAddress: types.StringValue(item.IPAddress),
ListenSSH: types.BoolValue(item.ListenSSH),
MAC: types.StringValue(item.MAC),
Name: types.StringValue(item.Name),
NetID: types.Int64Value(int64(item.NetID)),
NetMask: types.Int64Value(int64(item.NetMask)),
NetType: types.StringValue(item.NetType),
NodeID: types.Int64Value(int64(item.NodeID)),
PCISlot: types.Int64Value(item.PCISlot),
QOS: &models.QOSInListModel{
ERate: types.Int64Value(int64(item.QOS.ERate)),
GUID: types.StringValue(item.QOS.GUID),
InBurst: types.Int64Value(int64(item.QOS.InBurst)),
InRate: types.Int64Value(int64(item.QOS.InRate)),
},
Target: types.StringValue(item.Target),
Type: types.StringValue(item.Type),
}
temp.VNFs, diags = types.ListValueFrom(ctx, types.Int64Type, item.VNFs)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenInterfaceInListDeleted", diags))
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenInterfaceInListDeleted")
return res
}
func flattenSnapSetInListDeleted(ctx context.Context, snapSet *compute.ListSnapSets) []models.ItemSnapSetInListDeletedModel {
tflog.Info(ctx, "Start flattenSnapSetInListDeleted")
var diags diag.Diagnostics
res := make([]models.ItemSnapSetInListDeletedModel, 0, len(*snapSet))
for _, item := range *snapSet {
temp := models.ItemSnapSetInListDeletedModel{
GUID: types.StringValue(item.GUID),
Label: types.StringValue(item.Label),
Timestamp: types.Int64Value(int64(item.Timestamp)),
}
temp.Disks, diags = types.ListValueFrom(ctx, types.Int64Type, item.Disks)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenSnapSetInListDeleted", diags))
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenSnapSetInListDeleted")
return res
}

View File

@@ -0,0 +1,71 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
)
func DataSourceComputePciDeviceList(ctx context.Context, state *models.ListPCIDevicesModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start FlattenDataSourceComputePciDeviceList")
diags := diag.Diagnostics{}
pciDeviceList, err := utilities.ComputePciDeviceListCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Error get compute pci device list info", err.Error())
return diags
}
id := uuid.New()
*state = models.ListPCIDevicesModel{
ComputeID: state.ComputeID,
RGID: state.RGID,
DevID: state.DevID,
Name: state.Name,
Status: state.Status,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
Items: flattenPCIDevice(ctx, pciDeviceList.Data),
EntryCount: types.Int64Value(int64(pciDeviceList.EntryCount)),
}
tflog.Info(ctx, "End FlattenDataSourceComputePciDeviceList")
return nil
}
func flattenPCIDevice(ctx context.Context, deviceList []compute.ItemPCIDevice) []models.ItemPCIDevice {
tflog.Info(ctx, "Start flattenPCIDevice")
res := make([]models.ItemPCIDevice, 0, len(deviceList))
for _, item := range deviceList {
temp := models.ItemPCIDevice{
ComputeID: types.Int64Value(int64(item.ComputeID)),
Description: types.StringValue(item.Description),
GUID: types.Int64Value(int64(item.GUID)),
HwPath: types.StringValue(item.HwPath),
ID: types.Int64Value(int64(item.ID)),
Name: types.StringValue(item.Name),
RGID: types.Int64Value(int64(item.RGID)),
StackID: types.Int64Value(int64(item.StackID)),
Status: types.StringValue(item.Status),
SystemName: types.StringValue(item.SystemName),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenPCIDevice")
return res
}

View File

@@ -0,0 +1,60 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
)
func DataSourceComputePFWList(ctx context.Context, state *models.ListPFWsModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start FlattenDataSourceComputePFWList")
diags := diag.Diagnostics{}
pfwList, err := utilities.ComputePFWListCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Error get compute pfw list info", err.Error())
return diags
}
id := uuid.New()
*state = models.ListPFWsModel{
ComputeID: state.ComputeID,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
Items: flattenItemsPFWList(ctx, pfwList),
EntryCount: types.Int64Value(int64(pfwList.EntryCount)),
}
tflog.Info(ctx, "End FlattenDataSourceComputePFWList")
return nil
}
func flattenItemsPFWList(ctx context.Context, pfw *compute.ListPFWs) []models.ItemPFWModel {
tflog.Info(ctx, "Start flattenItemsPFWList")
res := make([]models.ItemPFWModel, 0, len(pfw.Data))
for _, item := range pfw.Data {
temp := models.ItemPFWModel{
PFWId: types.Int64Value(int64(item.ID)),
LocalIP: types.StringValue(item.LocalIP),
LocalPort: types.Int64Value(int64(item.LocalPort)),
Protocol: types.StringValue(item.Protocol),
PublicPortEnd: types.Int64Value(int64(item.PublicPortEnd)),
PublicPortStart: types.Int64Value(int64(item.PublicPortStart)),
VMID: types.Int64Value(int64(item.VMID)),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenItemsPFWList")
return res
}

View File

@@ -0,0 +1,57 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
)
func DataSourceComputeSnapshotUsage(ctx context.Context, state *models.ListSnapShotsModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start FlattenDataSourceComputeSnapshotUsage")
diags := diag.Diagnostics{}
snapshotUsage, err := utilities.ComputeSnapshotUsageCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Error get compute snapshot usage info", err.Error())
return diags
}
id := uuid.New()
*state = models.ListSnapShotsModel{
ComputeID: state.ComputeID,
Label: state.Label,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
Items: flattenItemsSnapshotUsage(ctx, snapshotUsage),
}
tflog.Info(ctx, "End FlattenDataSourceComputeSnapshotUsage")
return nil
}
func flattenItemsSnapshotUsage(ctx context.Context, snapshot *compute.ListUsageSnapshots) []models.ItemUsageSnapshotModel {
tflog.Info(ctx, "Start flattenItemsSnapshotUsage")
res := make([]models.ItemUsageSnapshotModel, 0, len(*snapshot))
for _, item := range *snapshot {
temp := models.ItemUsageSnapshotModel{
Count: types.Int64Value(int64(item.Count)),
Stored: types.Float64Value(float64(item.Stored)),
Label: types.StringValue(item.Label),
Timestamp: types.Int64Value(int64(item.Timestamp)),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenItemsSnapshotUsage")
return res
}

View File

@@ -0,0 +1,72 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
)
func DataSourceComputeUserList(ctx context.Context, state *models.ListUsersModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start FlattenDataSourceComputeUserList")
diags := diag.Diagnostics{}
userList, err := utilities.ComputeUserListCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Error get compute user list info", err.Error())
return diags
}
id := uuid.New()
*state = models.ListUsersModel{
ComputeID: state.ComputeID,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
Items: flattenUserList(ctx, userList),
EntryCount: types.Int64Value(int64(userList.EntryCount)),
}
tflog.Info(ctx, "End FlattenDataSourceComputeUserList")
return nil
}
func flattenUserList(ctx context.Context, users *compute.ListUsers) *models.RecordACLInListUsersModel {
tflog.Info(ctx, "Start flattenUserList")
res := models.RecordACLInListUsersModel{
AccountACL: flattenACLInUserList(ctx, &users.Data.AccountACL),
ComputeACL: flattenACLInUserList(ctx, &users.Data.ComputeACL),
RGACL: flattenACLInUserList(ctx, &users.Data.RGACL),
}
tflog.Info(ctx, "End flattenUserList")
return &res
}
func flattenACLInUserList(ctx context.Context, acl *compute.ListACL) []models.ItemACLInListUsersModel {
tflog.Info(ctx, "Start flattenACLInUserList")
res := make([]models.ItemACLInListUsersModel, 0, len(*acl))
for _, item := range *acl {
temp := models.ItemACLInListUsersModel{
Explicit: types.BoolValue(bool(item.Explicit)),
GUID: types.StringValue(item.GUID),
Right: types.StringValue(item.Right),
Status: types.StringValue(item.Status),
Type: types.StringValue(item.Type),
UserGroupID: types.StringValue(item.UserGroupID),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenACLInUserList")
return res
}

View File

@@ -0,0 +1,78 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
)
func DataSourceComputeVGPUList(ctx context.Context, state *models.ListVGPUsModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start FlattenDataSourceComputeVGPUList")
diags := diag.Diagnostics{}
vgpuList, err := utilities.ComputeVGPUListCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Error get compute vgpu list info", err.Error())
return diags
}
id := uuid.New()
*state = models.ListVGPUsModel{
ComputeID: state.ComputeID,
GPUID: state.GPUID,
Type: state.Type,
Status: state.Status,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
IncludeDeleted: state.IncludeDeleted,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
Items: flattenVGPU(ctx, vgpuList.Data),
EntryCount: types.Int64Value(int64(vgpuList.EntryCount)),
}
tflog.Info(ctx, "End FlattenDataSourceComputeVGPUList")
return nil
}
func flattenVGPU(ctx context.Context, vgpuList []compute.ItemVGPU) []models.ItemVGPU {
tflog.Info(ctx, "Start flattenVGPUItems")
res := make([]models.ItemVGPU, 0, len(vgpuList))
for _, item := range vgpuList {
temp := models.ItemVGPU{
AccountID: types.Int64Value(int64(item.AccountID)),
CreatedTime: types.Int64Value(int64(item.CreatedTime)),
DeletedTime: types.Int64Value(int64(item.DeletedTime)),
GID: types.Int64Value(int64(item.GID)),
GUID: types.Int64Value(int64(item.GUID)),
ID: types.Int64Value(int64(item.ID)),
LastClaimedBy: types.Int64Value(int64(item.LastClaimedBy)),
LastUpdateTime: types.Int64Value(int64(item.LastUpdateTime)),
Mode: types.StringValue(item.Mode),
PCISlot: types.Int64Value(int64(item.PCISlot)),
PGPUID: types.Int64Value(int64(item.PGPUID)),
ProfileID: types.Int64Value(int64(item.ProfileID)),
RAM: types.Int64Value(int64(item.RAM)),
ReferenceID: types.StringValue(item.ReferenceID),
RGID: types.Int64Value(int64(item.RGID)),
Status: types.StringValue(item.Status),
Type: types.StringValue(item.Type),
VMID: types.Int64Value(int64(item.VMID)),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenVGPUItems")
return res
}

View File

@@ -0,0 +1,494 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
disks "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
)
func ComputeResource(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.ComputeResource")
diags := diag.Diagnostics{}
recordItemCompute, diags := utilities.ComputeResourceCheckPresence(ctx, plan, c)
if diags.HasError() {
return diags
}
bootdisk := findBootDisk(recordItemCompute.Disks)
devices, _ := json.Marshal(recordItemCompute.Devices)
userdata, _ := json.Marshal(recordItemCompute.Userdata)
customFields, _ := json.Marshal(recordItemCompute.CustomFields)
*plan = models.ResourceComputeModel{
// required fields
Name: plan.Name,
RGID: plan.RGID,
Driver: plan.Driver,
CPU: plan.CPU,
RAM: plan.RAM,
ID: plan.ID,
Timeouts: plan.Timeouts,
// optional fields
ImageID: plan.ImageID,
WithoutBootDisk: plan.WithoutBootDisk,
// we intentionally use the SizeMax field, do not change it until the BootDiskSize field is fixed on the platform
BootDiskSize: types.Int64Value(int64(bootdisk.SizeMax)),
AffinityLabel: types.StringValue(recordItemCompute.AffinityLabel),
AffinityRules: plan.AffinityRules,
AntiAffinityRules: plan.AntiAffinityRules,
CustomFields: types.StringValue(string(customFields)),
Stateless: plan.Stateless,
SepId: types.Int64Value(int64(bootdisk.SepID)),
Pool: types.StringValue(bootdisk.Pool),
ExtraDisks: plan.ExtraDisks,
Network: flattenNetwork(ctx, &recordItemCompute.Interfaces),
Tags: plan.Tags,
PortForwarding: plan.PortForwarding,
UserAccess: plan.UserAccess,
Snapshot: plan.Snapshot,
Rollback: plan.Rollback,
CD: plan.CD,
PinToStack: plan.PinToStack,
Description: types.StringValue(recordItemCompute.Description),
CloudInit: plan.CloudInit,
Enabled: plan.Enabled,
Pause: plan.Pause,
Reset: plan.Reset,
Restore: plan.Restore,
AutoStart: plan.AutoStart,
ForceStop: plan.ForceStop,
ForceResize: plan.ForceResize,
DataDisks: plan.DataDisks,
Started: plan.Started,
DetachDisks: plan.DetachDisks,
Permanently: plan.Permanently,
IS: plan.IS,
IpaType: plan.IpaType,
NumaAffinity: plan.NumaAffinity,
CPUPin: plan.CPUPin,
HPBacked: plan.HPBacked,
//computed fields
AccountId: types.Int64Value(int64(recordItemCompute.AccountID)),
AccountName: types.StringValue(recordItemCompute.AccountName),
ACL: flattenResourceACL(ctx, &recordItemCompute.ACL),
AffinityWeight: types.Int64Value(int64(recordItemCompute.AffinityWeight)),
Architecture: types.StringValue(recordItemCompute.Architecture),
BootOrder: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordItemCompute.BootOrder),
BootDisk: flattenDisk(ctx, bootdisk),
BootDiskId: types.Int64Value(int64(bootdisk.ID)),
CdImageId: types.Int64Value(int64(recordItemCompute.CdImageId)),
CloneReference: types.Int64Value(int64(recordItemCompute.CloneReference)),
Clones: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, recordItemCompute.Clones),
ComputeCIID: types.Int64Value(int64(recordItemCompute.ComputeCIID)),
ComputeId: types.Int64Value(int64(recordItemCompute.ID)),
CreatedBy: types.StringValue(recordItemCompute.CreatedBy),
CreatedTime: types.Int64Value(int64(recordItemCompute.CreatedTime)),
DeletedBy: types.StringValue(recordItemCompute.DeletedBy),
DeletedTime: types.Int64Value(int64(recordItemCompute.DeletedTime)),
Devices: types.StringValue(string(devices)),
Disks: flattenResourceDisks(ctx, &recordItemCompute.Disks),
GID: types.Int64Value(int64(recordItemCompute.GID)),
GUID: types.Int64Value(int64(recordItemCompute.GUID)),
ImageName: types.StringValue(recordItemCompute.ImageName),
Interfaces: flattenResourceInterfaces(ctx, &recordItemCompute.Interfaces),
LockStatus: types.StringValue(recordItemCompute.LockStatus),
ManagerID: types.Int64Value(int64(recordItemCompute.ManagerID)),
ManagerType: types.StringValue(recordItemCompute.ManagerType),
MigrationJob: types.Int64Value(int64(recordItemCompute.MigrationJob)),
Milestones: types.Int64Value(int64(recordItemCompute.Milestones)),
NatableVINSID: types.Int64Value(int64(recordItemCompute.NatableVINSID)),
NatableVINSIP: types.StringValue(recordItemCompute.NatableVINSIP),
NatableVINSName: types.StringValue(recordItemCompute.NatableVINSName),
NatableVINSNetwork: types.StringValue(recordItemCompute.NatableVINSNetwork),
NatableVINSNetworkName: types.StringValue(recordItemCompute.NatableVINSNetworkName),
NeedReboot: types.BoolValue(recordItemCompute.NeedReboot),
NumaNodeId: types.Int64Value(int64(recordItemCompute.NumaNodeId)),
OSUsers: flattenResourceOSUsers(ctx, &recordItemCompute.OSUsers),
Pinned: types.BoolValue(recordItemCompute.Pinned),
ReferenceID: types.StringValue(recordItemCompute.ReferenceID),
Registered: types.BoolValue(recordItemCompute.Registered),
ResName: types.StringValue(recordItemCompute.ResName),
ReservedNodeCpus: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, recordItemCompute.ReservedNodeCpus),
RGName: types.StringValue(recordItemCompute.RGName),
SnapSets: flattenSnapSets(ctx, &recordItemCompute.SnapSets),
StatelessSepID: types.Int64Value(int64(recordItemCompute.StatelessSepID)),
StatelessSepType: types.StringValue(recordItemCompute.StatelessSepType),
Status: types.StringValue(recordItemCompute.Status),
TechStatus: types.StringValue(recordItemCompute.TechStatus),
UpdatedBy: types.StringValue(recordItemCompute.UpdatedBy),
UpdatedTime: types.Int64Value(int64(recordItemCompute.UpdatedTime)),
UserManaged: types.BoolValue(recordItemCompute.UserManaged),
Userdata: types.StringValue(string(userdata)),
VGPUs: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, recordItemCompute.VGPUs),
VirtualImageID: types.Int64Value(int64(recordItemCompute.VirtualImageID)),
VirtualImageName: types.StringValue(recordItemCompute.VirtualImageName),
}
tflog.Info(ctx, "End flattens.ComputeResource", map[string]any{"id": plan.ID.ValueString()})
return nil
}
func findBootDisk(disks compute.ListComputeDisks) *compute.ItemComputeDisk {
for _, disk := range disks {
if disk.Type == "B" {
return &disk
}
}
return &compute.ItemComputeDisk{}
}
func flattenResourceACL(ctx context.Context, acl *compute.RecordACL) types.Object {
tflog.Info(ctx, "Start flattenACL")
temp := models.RecordResourceACLModel{
AccountACL: flattenACLItems(ctx, &acl.AccountACL),
ComputeACL: flattenACLItems(ctx, &acl.ComputeACL),
RGACL: flattenACLItems(ctx, &acl.RGACL),
}
res, err := types.ObjectValueFrom(ctx, models.ListACL, temp)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenACL struct to obj", err))
}
tflog.Info(ctx, "End flattenACL")
return res
}
func flattenACLItems(ctx context.Context, item *compute.ListACL) types.List {
tflog.Info(ctx, "Start flattenACLItems")
tempSlice := make([]types.Object, 0, len(*item))
for _, aclItem := range *item {
temp := models.ItemACLModel{
Explicit: types.BoolValue(bool(aclItem.Explicit)),
GUID: types.StringValue(aclItem.GUID),
Right: types.StringValue(aclItem.Right),
Status: types.StringValue(aclItem.Status),
Type: types.StringValue(aclItem.Type),
UserGroupID: types.StringValue(aclItem.UserGroupID),
}
obj, err := types.ObjectValueFrom(ctx, models.ItemACL, temp)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenACLItems struct to obj", err))
}
tempSlice = append(tempSlice, obj)
}
res, err := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemACL}, tempSlice)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenACLItems", err))
}
tflog.Info(ctx, "End flattenACLItems")
return res
}
func flattenDisk(ctx context.Context, disk *compute.ItemComputeDisk) types.Object {
tflog.Info(ctx, fmt.Sprintf("flattenDisk: start flatten disk with ID - %v", disk.ID))
acl, _ := json.Marshal(disk.ACL)
temp := models.ItemResourceDiskModel{
CKey: types.StringValue(disk.CKey),
ACL: types.StringValue(string(acl)),
AccountID: types.Int64Value(int64(disk.AccountID)),
BootPartition: types.Int64Value(int64(disk.BootPartition)),
CreatedTime: types.Int64Value(int64(disk.CreatedTime)),
DeletedTime: types.Int64Value(int64(disk.DeletedTime)),
Description: types.StringValue(disk.Description),
DestructionTime: types.Int64Value(int64(disk.DestructionTime)),
DiskPath: types.StringValue(disk.DiskPath),
GID: types.Int64Value(int64(disk.GID)),
GUID: types.Int64Value(int64(disk.GUID)),
ID: types.Int64Value(int64(disk.ID)),
ImageID: types.Int64Value(int64(disk.ImageID)),
Images: flattens.FlattenSimpleTypeToList(ctx, types.StringType, disk.Images),
IOTune: flattensIOTune(ctx, &disk.IOTune),
IQN: types.StringValue(disk.IQN),
Login: types.StringValue(disk.Login),
Milestones: types.Int64Value(int64(disk.Milestones)),
Name: types.StringValue(disk.Name),
Order: types.Int64Value(int64(disk.Order)),
Params: types.StringValue(disk.Params),
ParentID: types.Int64Value(int64(disk.ParentID)),
Passwd: types.StringValue(disk.Passwd),
Pool: types.StringValue(disk.Pool),
PCISlot: types.Int64Value(disk.PCISlot),
PresentTo: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, disk.PresentTo),
PurgeTime: types.Int64Value(int64(disk.PurgeTime)),
RealityDeviceNumber: types.Int64Value(int64(disk.RealityDeviceNumber)),
Replication: flattenDiskReplication(ctx, &disk.Replication),
ResID: types.StringValue(disk.ResID),
Role: types.StringValue(disk.Role),
SepID: types.Int64Value(int64(disk.SepID)),
Shareable: types.BoolValue(disk.Shareable),
SizeMax: types.Int64Value(int64(disk.SizeMax)),
SizeUsed: types.Float64Value(disk.SizeUsed),
Snapshots: flattenResourceSnapshotExtend(ctx, &disk.Snapshots),
Status: types.StringValue(disk.Status),
TechStatus: types.StringValue(disk.TechStatus),
Type: types.StringValue(disk.Type),
VMID: types.Int64Value(int64(disk.VMID)),
}
res, err := types.ObjectValueFrom(ctx, models.ItemDisk, temp)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenDisk struct to obj", err))
}
tflog.Info(ctx, fmt.Sprintf("flattenDisk: end flatten disk with ID - %v", disk.ID))
return res
}
func flattensIOTune(ctx context.Context, ioTune *compute.IOTune) types.Object {
tflog.Info(ctx, "Start flattensIOTune")
temp := models.IOTuneModel{
ReadBytesSec: types.Int64Value(int64(ioTune.ReadBytesSec)),
ReadBytesSecMax: types.Int64Value(int64(ioTune.ReadBytesSecMax)),
ReadIOPSSec: types.Int64Value(int64(ioTune.ReadIOPSSec)),
ReadIOPSSecMax: types.Int64Value(int64(ioTune.ReadIOPSSecMax)),
SizeIOPSSec: types.Int64Value(int64(ioTune.SizeIOPSSec)),
TotalBytesSec: types.Int64Value(int64(ioTune.TotalBytesSec)),
TotalBytesSecMax: types.Int64Value(int64(ioTune.TotalBytesSecMax)),
TotalIOPSSec: types.Int64Value(int64(ioTune.TotalIOPSSec)),
TotalIOPSSecMax: types.Int64Value(int64(ioTune.TotalIOPSSecMax)),
WriteBytesSec: types.Int64Value(int64(ioTune.WriteBytesSec)),
WriteBytesSecMax: types.Int64Value(int64(ioTune.WriteBytesSecMax)),
WriteIOPSSec: types.Int64Value(int64(ioTune.WriteIOPSSec)),
WriteIOPSSecMax: types.Int64Value(int64(ioTune.WriteIOPSSecMax)),
}
res, err := types.ObjectValueFrom(ctx, disks.ItemIOTune, temp)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattensIOTune struct to obj", err))
}
tflog.Info(ctx, "End flattensIOTune")
return res
}
func flattenDiskReplication(ctx context.Context, replication *compute.ItemReplication) types.Object {
tflog.Info(ctx, "Start flattenDiskReplication")
temp := models.ReplicationModel{
DiskID: types.Int64Value(int64(replication.DiskID)),
PoolID: types.StringValue(replication.PoolID),
Role: types.StringValue(replication.Role),
SelfVolumeID: types.StringValue(replication.SelfVolumeID),
StorageID: types.StringValue(replication.StorageID),
VolumeID: types.StringValue(replication.VolumeID),
}
res, err := types.ObjectValueFrom(ctx, models.ItemReplication, temp)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenDiskReplication struct to obj", err))
}
tflog.Info(ctx, "End flattenDiskReplication")
return res
}
func flattenResourceSnapshotExtend(ctx context.Context, snapshotList *compute.SnapshotExtendList) types.List {
tflog.Info(ctx, "Start flattenResourceSnapshotExtend")
tempSlice := make([]types.Object, 0, len(*snapshotList))
for _, snapshot := range *snapshotList {
temp := models.ItemSnapshotExtendModel{
GUID: types.StringValue(snapshot.GUID),
Label: types.StringValue(snapshot.Label),
ReferenceID: types.StringValue(snapshot.ReferenceID),
ResID: types.StringValue(snapshot.ResID),
SnapSetGUID: types.StringValue(snapshot.SnapSetGUID),
SnapSetTime: types.Int64Value(int64(snapshot.SnapSetTime)),
TimeStamp: types.Int64Value(int64(snapshot.TimeStamp)),
}
obj, err := types.ObjectValueFrom(ctx, models.ItemSnapshot, temp)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenResourceSnapshotExtend struct to obj", err))
}
tempSlice = append(tempSlice, obj)
}
res, err := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemSnapshot}, tempSlice)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenResourceSnapshotExtend", err))
}
tflog.Info(ctx, "End flattenResourceSnapshotExtend")
return res
}
func flattenResourceOSUsers(ctx context.Context, osUsersList *compute.ListOSUser) types.List {
tflog.Info(ctx, "Start flattensOSUsers")
tempSlice := make([]types.Object, 0, len(*osUsersList))
for _, osUser := range *osUsersList {
temp := models.ItemOSUserModel{
GUID: types.StringValue(osUser.GUID),
Login: types.StringValue(osUser.Login),
Password: types.StringValue(osUser.Password),
PubKey: types.StringValue(osUser.PubKey),
}
obj, err := types.ObjectValueFrom(ctx, models.ItemOSUsers, temp)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattensOSUsers struct to obj", err))
}
tempSlice = append(tempSlice, obj)
}
res, err := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemOSUsers}, tempSlice)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattensOSUsers", err))
}
tflog.Info(ctx, "End flattensOSUsers")
return res
}
func flattenResourceDisks(ctx context.Context, disksList *compute.ListComputeDisks) types.List {
tflog.Info(ctx, "Start flattenResourceDisks")
tempSlice := make([]types.Object, 0, len(*disksList))
for _, disk := range *disksList {
if disk.Type == "B" {
continue
}
temp := flattenDisk(ctx, &disk)
obj, err := types.ObjectValueFrom(ctx, models.ItemDisk, temp)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenResourceDisks struct to obj", err))
}
tempSlice = append(tempSlice, obj)
}
res, err := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemDisk}, tempSlice)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenResourceDisks", err))
}
tflog.Info(ctx, "End flattenResourceDisks")
return res
}
func flattenResourceInterfaces(ctx context.Context, interfaces *compute.ListInterfaces) types.List {
tflog.Info(ctx, "Start flattenResourceInterfaces")
tempSlice := make([]types.Object, 0, len(*interfaces))
for _, item := range *interfaces {
temp := models.ItemResourceInterfacesModel{
ConnID: types.Int64Value(int64(item.ConnID)),
ConnType: types.StringValue(item.ConnType),
GetGW: types.StringValue(item.DefGW),
Enabled: types.BoolValue(item.Enabled),
FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)),
GUID: types.StringValue(item.GUID),
IPAddress: types.StringValue(item.IPAddress),
ListenSSH: types.BoolValue(item.ListenSSH),
MAC: types.StringValue(item.MAC),
Name: types.StringValue(item.Name),
NetID: types.Int64Value(int64(item.NetID)),
NetMask: types.Int64Value(int64(item.NetMask)),
NetType: types.StringValue(item.NetType),
NodeID: types.Int64Value(int64(item.NodeID)),
PCISlot: types.Int64Value(item.PCISlot),
QOS: flattenQOS(ctx, &item.QOS),
Target: types.StringValue(item.Target),
Type: types.StringValue(item.Type),
VNFs: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, item.VNFs),
}
obj, err := types.ObjectValueFrom(ctx, models.ItemInterfaces, temp)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenResourceInterfaces struct to obj", err))
}
tempSlice = append(tempSlice, obj)
}
res, err := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemInterfaces}, tempSlice)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenResourceInterfaces", err))
}
tflog.Info(ctx, "End flattenInterfaces")
return res
}
func flattenQOS(ctx context.Context, QOS *compute.QOS) types.Object {
tflog.Info(ctx, "Start flattenQOS")
temp := models.QOSModel{
ERate: types.Int64Value(int64(QOS.ERate)),
GUID: types.StringValue(QOS.GUID),
InBurst: types.Int64Value(int64(QOS.InBurst)),
InRate: types.Int64Value(int64(QOS.InRate)),
}
res, err := types.ObjectValueFrom(ctx, models.ItemQos, temp)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenQOS struct to obj", err))
}
tflog.Info(ctx, "End flattenQOS")
return res
}
func flattenSnapSets(ctx context.Context, snapSets *compute.ListSnapSets) types.List {
tflog.Info(ctx, "Start flattenSnapSets")
tempSlice := make([]types.Object, 0, len(*snapSets))
for _, snapSet := range *snapSets {
temp := models.ItemSnapSetModel{
Disks: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, snapSet.Disks),
GUID: types.StringValue(snapSet.GUID),
Label: types.StringValue(snapSet.Label),
Timestamp: types.Int64Value(int64(snapSet.Timestamp)),
}
obj, err := types.ObjectValueFrom(ctx, models.ItemSnapSets, temp)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenSnapSets struct to obj", err))
}
tempSlice = append(tempSlice, obj)
}
res, err := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemSnapSets}, tempSlice)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenSnapSets", err))
}
tflog.Info(ctx, "End flattenSnapSets")
return res
}
func flattenNetwork(ctx context.Context, interfaces *compute.ListInterfaces) types.Set {
tflog.Info(ctx, "Start flattenNetwork")
tempSlice := make([]types.Object, 0, len(*interfaces))
for _, item := range *interfaces {
temp := models.ItemNetworkModel{
NetType: types.StringValue(item.NetType),
NetId: types.Int64Value(int64(item.NetID)),
IpAddress: types.StringValue(item.IPAddress),
Mac: types.StringValue(item.MAC),
}
obj, err := types.ObjectValueFrom(ctx, models.ItemNetwork, temp)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenNetwork struct to obj", err))
}
tempSlice = append(tempSlice, obj)
}
res, err := types.SetValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemNetwork}, tempSlice)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenNetwork", err))
}
tflog.Info(ctx, "End flattenNetwork")
return res
}

View File

@@ -0,0 +1,83 @@
package kvmvm
import (
"context"
"fmt"
"strings"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/ic"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
)
// resourceComputeInputChecks checks if rg_id and image_id are valid.
func resourceComputeInputChecks(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
rgID := uint64(plan.RGID.ValueInt64())
tflog.Info(ctx, "resourceComputeInputChecks: exist resource rg", map[string]any{"rg_id": rgID})
err := ic.ExistRG(ctx, rgID, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about rg with ID %v", rgID), err.Error())
}
var imageID uint64
if plan.WithoutBootDisk.IsNull() || !plan.WithoutBootDisk.ValueBool() {
if plan.ImageID.IsNull() {
diags.AddError(fmt.Sprintf("imageId must be specified if the compute with boot disk is to be created"), "")
} else {
imageID = uint64(plan.ImageID.ValueInt64())
}
tflog.Info(ctx, "resourceComputeInputChecks: exist image", map[string]any{"image_id": imageID})
err = ic.ExistImage(ctx, imageID, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about image with ID %v", imageID), err.Error())
}
}
if !plan.Network.IsNull() {
networkList := plan.Network.Elements()
for _, elem := range networkList {
objVal := elem.(types.Object)
elemMap := objVal.Attributes()
netType := strings.ToUpper(elemMap["net_type"].(types.String).ValueString())
switch netType {
case "VINS":
vinsId := uint64(elemMap["net_id"].(types.Int64).ValueInt64())
err = ic.ExistVins(ctx, vinsId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot create compute because vins ID %d is not allowed or does not exist", vinsId), err.Error())
}
case "EXTNET":
extNetId := uint64(elemMap["net_id"].(types.Int64).ValueInt64())
err = ic.ExistExtNet(ctx, extNetId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot create compute because extnet ID %d is not allowed or does not exist", extNetId), err.Error())
}
case "VFNIC":
if strings.EqualFold(plan.Driver.ValueString(), "KVM_PPC") {
diags.AddError("can't create compute because 'VFNIC' net_type is not allowed for driver 'KVM_PPC'", "")
}
vfpoolId := uint64(elemMap["net_id"].(types.Int64).ValueInt64())
err = ic.ExistVFPool(ctx, vfpoolId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot create compute because vfpool ID %d is not allowed or does not exist", vfpoolId), err.Error())
}
}
}
}
if !plan.Pool.IsUnknown() && plan.SepId.IsUnknown() {
diags.AddError("sep_id required if pool name enable", "")
}
if plan.WithoutBootDisk.ValueBool() && (!plan.BootDiskSize.IsUnknown() || !plan.Pool.IsUnknown() || !plan.SepId.IsUnknown()) {
diags.AddError("pool, boot_disk_size and sep_id should be empty if without_boot_disk is true", "")
}
return diags
}

View File

@@ -0,0 +1,227 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type RecordComputeModel struct {
// request fields
ComputeId types.Int64 `tfsdk:"compute_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
ACL *RecordACLModel `tfsdk:"acl"`
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
AffinityLabel types.String `tfsdk:"affinity_label"`
AffinityRules []ItemRuleModel `tfsdk:"affinity_rules"`
AffinityWeight types.Int64 `tfsdk:"affinity_weight"`
AntiAffinityRules []ItemRuleModel `tfsdk:"anti_affinity_rules"`
Architecture types.String `tfsdk:"arch"`
BootOrder types.List `tfsdk:"boot_order"`
BootDiskSize types.Int64 `tfsdk:"bootdisk_size"`
CdImageId types.Int64 `tfsdk:"cd_image_id"`
CloneReference types.Int64 `tfsdk:"clone_reference"`
Clones types.List `tfsdk:"clones"`
ComputeCIID types.Int64 `tfsdk:"computeci_id"`
CPU types.Int64 `tfsdk:"cpus"`
CPUPin types.Bool `tfsdk:"cpu_pin"`
CreatedBy types.String `tfsdk:"created_by"`
CreatedTime types.Int64 `tfsdk:"created_time"`
CustomFields types.String `tfsdk:"custom_fields"`
DeletedBy types.String `tfsdk:"deleted_by"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Description types.String `tfsdk:"desc"`
Devices types.String `tfsdk:"devices"`
Disks []ItemDiskModel `tfsdk:"disks"`
Driver types.String `tfsdk:"driver"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
HPBacked types.Bool `tfsdk:"hp_backed"`
ImageID types.Int64 `tfsdk:"image_id"`
ImageName types.String `tfsdk:"image_name"`
Interfaces []ItemInterfaceModel `tfsdk:"interfaces"`
LockStatus types.String `tfsdk:"lock_status"`
ManagerID types.Int64 `tfsdk:"manager_id"`
ManagerType types.String `tfsdk:"manager_type"`
MigrationJob types.Int64 `tfsdk:"migrationjob"`
Milestones types.Int64 `tfsdk:"milestones"`
Name types.String `tfsdk:"name"`
NatableVINSID types.Int64 `tfsdk:"natable_vins_id"`
NatableVINSIP types.String `tfsdk:"natable_vins_ip"`
NatableVINSName types.String `tfsdk:"natable_vins_name"`
NatableVINSNetwork types.String `tfsdk:"natable_vins_network"`
NatableVINSNetworkName types.String `tfsdk:"natable_vins_network_name"`
NeedReboot types.Bool `tfsdk:"need_reboot"`
NumaAffinity types.String `tfsdk:"numa_affinity"`
NumaNodeId types.Int64 `tfsdk:"numa_node_id"`
OSUsers []ItemOSUserModel `tfsdk:"os_users"`
Pinned types.Bool `tfsdk:"pinned"`
RAM types.Int64 `tfsdk:"ram"`
ReferenceID types.String `tfsdk:"reference_id"`
Registered types.Bool `tfsdk:"registered"`
ResName types.String `tfsdk:"res_name"`
ReservedNodeCpus types.List `tfsdk:"reserved_node_cpus"`
RGID types.Int64 `tfsdk:"rg_id"`
RGName types.String `tfsdk:"rg_name"`
SnapSets []ItemSnapSetModel `tfsdk:"snap_sets"`
StatelessSepID types.Int64 `tfsdk:"stateless_sep_id"`
StatelessSepType types.String `tfsdk:"stateless_sep_type"`
Status types.String `tfsdk:"status"`
Tags types.Map `tfsdk:"tags"`
TechStatus types.String `tfsdk:"tech_status"`
UpdatedBy types.String `tfsdk:"updated_by"`
UpdatedTime types.Int64 `tfsdk:"updated_time"`
UserManaged types.Bool `tfsdk:"user_managed"`
Userdata types.String `tfsdk:"user_data"`
VGPUs types.List `tfsdk:"vgpus"`
VirtualImageID types.Int64 `tfsdk:"virtual_image_id"`
VirtualImageName types.String `tfsdk:"virtual_image_name"`
}
type RecordACLModel struct {
AccountACL []ItemACLModel `tfsdk:"account_acl"`
ComputeACL []ItemACLModel `tfsdk:"compute_acl"`
RGACL []ItemACLModel `tfsdk:"rg_acl"`
}
type ItemACLModel struct {
Explicit types.Bool `tfsdk:"explicit"`
GUID types.String `tfsdk:"guid"`
Right types.String `tfsdk:"right"`
Status types.String `tfsdk:"status"`
Type types.String `tfsdk:"type"`
UserGroupID types.String `tfsdk:"user_group_id"`
}
type ItemRuleModel struct {
GUID types.String `tfsdk:"guid"`
Key types.String `tfsdk:"key"`
Mode types.String `tfsdk:"mode"`
Policy types.String `tfsdk:"policy"`
Topology types.String `tfsdk:"topology"`
Value types.String `tfsdk:"value"`
}
type ItemDiskModel struct {
CKey types.String `tfsdk:"ckey"`
ACL types.String `tfsdk:"acl"`
AccountID types.Int64 `tfsdk:"account_id"`
BootPartition types.Int64 `tfsdk:"boot_partition"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Description types.String `tfsdk:"desc"`
DestructionTime types.Int64 `tfsdk:"destruction_time"`
DiskPath types.String `tfsdk:"disk_path"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
ID types.Int64 `tfsdk:"disk_id"`
ImageID types.Int64 `tfsdk:"image_id"`
Images types.List `tfsdk:"images"`
IOTune *IOTuneModel `tfsdk:"iotune"`
IQN types.String `tfsdk:"iqn"`
Login types.String `tfsdk:"login"`
Milestones types.Int64 `tfsdk:"milestones"`
Name types.String `tfsdk:"name"`
Order types.Int64 `tfsdk:"order"`
Params types.String `tfsdk:"params"`
ParentID types.Int64 `tfsdk:"parent_id"`
Passwd types.String `tfsdk:"passwd"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
Pool types.String `tfsdk:"pool"`
PresentTo types.List `tfsdk:"present_to"`
PurgeTime types.Int64 `tfsdk:"purge_time"`
ReferenceID types.String `tfsdk:"reference_id"`
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
Replication *ReplicationModel `tfsdk:"replication"`
ResID types.String `tfsdk:"res_id"`
Role types.String `tfsdk:"role"`
SepID types.Int64 `tfsdk:"sep_id"`
Shareable types.Bool `tfsdk:"shareable"`
SizeMax types.Int64 `tfsdk:"size_max"`
SizeUsed types.Float64 `tfsdk:"size_used"`
Snapshots []ItemSnapshotExtendModel `tfsdk:"snapshots"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
Type types.String `tfsdk:"type"`
VMID types.Int64 `tfsdk:"vmid"`
}
type ItemInterfaceModel struct {
ConnID types.Int64 `tfsdk:"conn_id"`
ConnType types.String `tfsdk:"conn_type"`
DefGW types.String `tfsdk:"def_gw"`
Enabled types.Bool `tfsdk:"enabled"`
FLIPGroupID types.Int64 `tfsdk:"flip_group_id"`
GUID types.String `tfsdk:"guid"`
IPAddress types.String `tfsdk:"ip_address"`
ListenSSH types.Bool `tfsdk:"listen_ssh"`
MAC types.String `tfsdk:"mac"`
Name types.String `tfsdk:"name"`
NetID types.Int64 `tfsdk:"net_id"`
NetMask types.Int64 `tfsdk:"netmask"`
NetType types.String `tfsdk:"net_type"`
NodeID types.Int64 `tfsdk:"node_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
QOS *QOSModel `tfsdk:"qos"`
Target types.String `tfsdk:"target"`
Type types.String `tfsdk:"type"`
VNFs types.List `tfsdk:"vnfs"`
}
type QOSModel struct {
ERate types.Int64 `tfsdk:"e_rate"`
GUID types.String `tfsdk:"guid"`
InBurst types.Int64 `tfsdk:"in_burst"`
InRate types.Int64 `tfsdk:"in_rate"`
}
type ItemSnapSetModel struct {
Disks types.List `tfsdk:"disks"`
GUID types.String `tfsdk:"guid"`
Label types.String `tfsdk:"label"`
Timestamp types.Int64 `tfsdk:"timestamp"`
}
type IOTuneModel struct {
ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"`
ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"`
ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"`
ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"`
SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"`
TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"`
TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"`
TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"`
TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"`
WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"`
WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"`
WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"`
WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"`
}
type ItemSnapshotExtendModel struct {
GUID types.String `tfsdk:"guid"`
Label types.String `tfsdk:"label"`
ReferenceID types.String `tfsdk:"reference_id"`
ResID types.String `tfsdk:"res_id"`
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
TimeStamp types.Int64 `tfsdk:"timestamp"`
}
type ItemOSUserModel struct {
GUID types.String `tfsdk:"guid"`
Login types.String `tfsdk:"login"`
Password types.String `tfsdk:"password"`
PubKey types.String `tfsdk:"public_key"`
}
type ReplicationModel struct {
DiskID types.Int64 `tfsdk:"disk_id"`
PoolID types.String `tfsdk:"pool_id"`
Role types.String `tfsdk:"role"`
SelfVolumeID types.String `tfsdk:"self_volume_id"`
StorageID types.String `tfsdk:"storage_id"`
VolumeID types.String `tfsdk:"volume_id"`
}

View File

@@ -0,0 +1,23 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ListAuditsModel struct {
// request fields
ComputeID types.Int64 `tfsdk:"compute_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemAuditModel `tfsdk:"items"`
}
type ItemAuditModel struct {
Call types.String `tfsdk:"call"`
ResponseTime types.Float64 `tfsdk:"responsetime"`
StatusCode types.Int64 `tfsdk:"statuscode"`
Timestamp types.Float64 `tfsdk:"timestamp"`
User types.String `tfsdk:"user"`
}

View File

@@ -0,0 +1,20 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type GetAuditsModel struct {
// request fields
ComputeID types.Int64 `tfsdk:"compute_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemShortAuditModel `tfsdk:"items"`
}
type ItemShortAuditModel struct {
Epoch types.Float64 `tfsdk:"epoch"`
Message types.String `tfsdk:"message"`
}

View File

@@ -0,0 +1,15 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type GetConsoleUrlModel struct {
// request fields
ComputeID types.Int64 `tfsdk:"compute_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
ConsoleURL types.String `tfsdk:"console_url"`
}

View File

@@ -0,0 +1,16 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type GetLogModel struct {
// request fields
ComputeID types.Int64 `tfsdk:"compute_id"`
Path types.String `tfsdk:"path"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Log types.String `tfsdk:"log"`
}

View File

@@ -0,0 +1,153 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ListComputesModel struct {
// request fields
ByID types.Int64 `tfsdk:"by_id"`
Name types.String `tfsdk:"name"`
AccountID types.Int64 `tfsdk:"account_id"`
RGName types.String `tfsdk:"rg_name"`
RGID types.Int64 `tfsdk:"rg_id"`
TechStatus types.String `tfsdk:"tech_status"`
Status types.String `tfsdk:"status"`
IPAddress types.String `tfsdk:"ip_address"`
ExtNetName types.String `tfsdk:"extnet_name"`
ExtNetID types.Int64 `tfsdk:"extnet_id"`
IncludeDeleted types.Bool `tfsdk:"includedeleted"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
IgnoreK8s types.Bool `tfsdk:"ignore_k8s"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemComputeModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemComputeModel struct {
ACL []ItemACLInListModel `tfsdk:"acl"`
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
AffinityLabel types.String `tfsdk:"affinity_label"`
AffinityRules []ItemRuleInListModel `tfsdk:"affinity_rules"`
AffinityWeight types.Int64 `tfsdk:"affinity_weight"`
AntiAffinityRules []ItemRuleInListModel `tfsdk:"anti_affinity_rules"`
Architecture types.String `tfsdk:"arch"`
BootOrder types.List `tfsdk:"boot_order"`
BootDiskSize types.Int64 `tfsdk:"bootdisk_size"`
CdImageId types.Int64 `tfsdk:"cd_image_id"`
CloneReference types.Int64 `tfsdk:"clone_reference"`
Clones types.List `tfsdk:"clones"`
ComputeCIID types.Int64 `tfsdk:"computeci_id"`
CPU types.Int64 `tfsdk:"cpus"`
CPUPin types.Bool `tfsdk:"cpu_pin"`
CreatedBy types.String `tfsdk:"created_by"`
CreatedTime types.Int64 `tfsdk:"created_time"`
CustomFields types.String `tfsdk:"custom_fields"`
DeletedBy types.String `tfsdk:"deleted_by"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Description types.String `tfsdk:"desc"`
Devices types.String `tfsdk:"devices"`
Disks []DiskInListModel `tfsdk:"disks"`
Driver types.String `tfsdk:"driver"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
HPBacked types.Bool `tfsdk:"hp_backed"`
ComputeId types.Int64 `tfsdk:"compute_id"`
ImageID types.Int64 `tfsdk:"image_id"`
Interfaces []ItemVNFInterfaceInListModel `tfsdk:"interfaces"`
LockStatus types.String `tfsdk:"lock_status"`
ManagerID types.Int64 `tfsdk:"manager_id"`
ManagerType types.String `tfsdk:"manager_type"`
MigrationJob types.Int64 `tfsdk:"migrationjob"`
Milestones types.Int64 `tfsdk:"milestones"`
Name types.String `tfsdk:"name"`
NeedReboot types.Bool `tfsdk:"need_reboot"`
NumaAffinity types.String `tfsdk:"numa_affinity"`
NumaNodeId types.Int64 `tfsdk:"numa_node_id"`
Pinned types.Bool `tfsdk:"pinned"`
RAM types.Int64 `tfsdk:"ram"`
ReferenceID types.String `tfsdk:"reference_id"`
Registered types.Bool `tfsdk:"registered"`
ResName types.String `tfsdk:"res_name"`
ReservedNodeCpus types.List `tfsdk:"reserved_node_cpus"`
RGID types.Int64 `tfsdk:"rg_id"`
RGName types.String `tfsdk:"rg_name"`
SnapSets []ItemSnapSetInListModel `tfsdk:"snap_sets"`
StatelessSepID types.Int64 `tfsdk:"stateless_sep_id"`
StatelessSepType types.String `tfsdk:"stateless_sep_type"`
Status types.String `tfsdk:"status"`
Tags types.Map `tfsdk:"tags"`
TechStatus types.String `tfsdk:"tech_status"`
TotalDiskSize types.Int64 `tfsdk:"total_disks_size"`
UpdatedBy types.String `tfsdk:"updated_by"`
UpdatedTime types.Int64 `tfsdk:"updated_time"`
UserManaged types.Bool `tfsdk:"user_managed"`
VGPUs types.List `tfsdk:"vgpus"`
VINSConnected types.Int64 `tfsdk:"vins_connected"`
VirtualImageID types.Int64 `tfsdk:"virtual_image_id"`
}
type ItemACLInListModel struct {
Explicit types.Bool `tfsdk:"explicit"`
GUID types.String `tfsdk:"guid"`
Right types.String `tfsdk:"right"`
Status types.String `tfsdk:"status"`
Type types.String `tfsdk:"type"`
UserGroupID types.String `tfsdk:"user_group_id"`
}
type ItemRuleInListModel struct {
GUID types.String `tfsdk:"guid"`
Key types.String `tfsdk:"key"`
Mode types.String `tfsdk:"mode"`
Policy types.String `tfsdk:"policy"`
Topology types.String `tfsdk:"topology"`
Value types.String `tfsdk:"value"`
}
type DiskInListModel struct {
DiskId types.Int64 `tfsdk:"disk_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
}
type ItemVNFInterfaceInListModel struct {
ConnID types.Int64 `tfsdk:"conn_id"`
ConnType types.String `tfsdk:"conn_type"`
DefGW types.String `tfsdk:"def_gw"`
Enabled types.Bool `tfsdk:"enabled"`
FLIPGroupID types.Int64 `tfsdk:"flip_group_id"`
GUID types.String `tfsdk:"guid"`
IPAddress types.String `tfsdk:"ip_address"`
ListenSSH types.Bool `tfsdk:"listen_ssh"`
MAC types.String `tfsdk:"mac"`
Name types.String `tfsdk:"name"`
NetID types.Int64 `tfsdk:"net_id"`
NetMask types.Int64 `tfsdk:"netmask"`
NetType types.String `tfsdk:"net_type"`
NodeID types.Int64 `tfsdk:"node_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
QOS *QOSInListModel `tfsdk:"qos"`
Target types.String `tfsdk:"target"`
Type types.String `tfsdk:"type"`
VNFs types.List `tfsdk:"vnfs"`
}
type QOSInListModel struct {
ERate types.Int64 `tfsdk:"e_rate"`
GUID types.String `tfsdk:"guid"`
InBurst types.Int64 `tfsdk:"in_burst"`
InRate types.Int64 `tfsdk:"in_rate"`
}
type ItemSnapSetInListModel struct {
Disks types.List `tfsdk:"disks"`
GUID types.String `tfsdk:"guid"`
Label types.String `tfsdk:"label"`
Timestamp types.Int64 `tfsdk:"timestamp"`
}

View File

@@ -0,0 +1,151 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ListDeletedComputesModel struct {
// request fields
ByID types.Int64 `tfsdk:"by_id"`
Name types.String `tfsdk:"name"`
AccountID types.Int64 `tfsdk:"account_id"`
RGName types.String `tfsdk:"rg_name"`
RGID types.Int64 `tfsdk:"rg_id"`
TechStatus types.String `tfsdk:"tech_status"`
IPAddress types.String `tfsdk:"ip_address"`
ExtNetName types.String `tfsdk:"extnet_name"`
ExtNetID types.Int64 `tfsdk:"extnet_id"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
IgnoreK8s types.Bool `tfsdk:"ignore_k8s"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemListDeletedComputeModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemListDeletedComputeModel struct {
ACL []ItemACLInListDeletedModel `tfsdk:"acl"`
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
AffinityLabel types.String `tfsdk:"affinity_label"`
AffinityRules []ItemRuleInListDeletedModel `tfsdk:"affinity_rules"`
AffinityWeight types.Int64 `tfsdk:"affinity_weight"`
AntiAffinityRules []ItemRuleInListDeletedModel `tfsdk:"anti_affinity_rules"`
Architecture types.String `tfsdk:"arch"`
BootOrder types.List `tfsdk:"boot_order"`
BootDiskSize types.Int64 `tfsdk:"bootdisk_size"`
CdImageId types.Int64 `tfsdk:"cd_image_id"`
CloneReference types.Int64 `tfsdk:"clone_reference"`
Clones types.List `tfsdk:"clones"`
ComputeCIID types.Int64 `tfsdk:"computeci_id"`
CPU types.Int64 `tfsdk:"cpus"`
CPUPin types.Bool `tfsdk:"cpu_pin"`
CreatedBy types.String `tfsdk:"created_by"`
CreatedTime types.Int64 `tfsdk:"created_time"`
CustomFields types.String `tfsdk:"custom_fields"`
DeletedBy types.String `tfsdk:"deleted_by"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Description types.String `tfsdk:"desc"`
Devices types.String `tfsdk:"devices"`
Disks []DiskInListDeletedModel `tfsdk:"disks"`
Driver types.String `tfsdk:"driver"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
HPBacked types.Bool `tfsdk:"hp_backed"`
ComputeId types.Int64 `tfsdk:"compute_id"`
ImageID types.Int64 `tfsdk:"image_id"`
Interfaces []ItemVNFInterfaceInListDeletedModel `tfsdk:"interfaces"`
LockStatus types.String `tfsdk:"lock_status"`
ManagerID types.Int64 `tfsdk:"manager_id"`
ManagerType types.String `tfsdk:"manager_type"`
MigrationJob types.Int64 `tfsdk:"migrationjob"`
Milestones types.Int64 `tfsdk:"milestones"`
Name types.String `tfsdk:"name"`
NeedReboot types.Bool `tfsdk:"need_reboot"`
NumaAffinity types.String `tfsdk:"numa_affinity"`
NumaNodeId types.Int64 `tfsdk:"numa_node_id"`
Pinned types.Bool `tfsdk:"pinned"`
RAM types.Int64 `tfsdk:"ram"`
ReferenceID types.String `tfsdk:"reference_id"`
Registered types.Bool `tfsdk:"registered"`
ResName types.String `tfsdk:"res_name"`
ReservedNodeCpus types.List `tfsdk:"reserved_node_cpus"`
RGID types.Int64 `tfsdk:"rg_id"`
RGName types.String `tfsdk:"rg_name"`
SnapSets []ItemSnapSetInListDeletedModel `tfsdk:"snap_sets"`
StatelessSepID types.Int64 `tfsdk:"stateless_sep_id"`
StatelessSepType types.String `tfsdk:"stateless_sep_type"`
Status types.String `tfsdk:"status"`
Tags types.Map `tfsdk:"tags"`
TechStatus types.String `tfsdk:"tech_status"`
TotalDiskSize types.Int64 `tfsdk:"total_disks_size"`
UpdatedBy types.String `tfsdk:"updated_by"`
UpdatedTime types.Int64 `tfsdk:"updated_time"`
UserManaged types.Bool `tfsdk:"user_managed"`
VGPUs types.List `tfsdk:"vgpus"`
VINSConnected types.Int64 `tfsdk:"vins_connected"`
VirtualImageID types.Int64 `tfsdk:"virtual_image_id"`
}
type ItemACLInListDeletedModel struct {
Explicit types.Bool `tfsdk:"explicit"`
GUID types.String `tfsdk:"guid"`
Right types.String `tfsdk:"right"`
Status types.String `tfsdk:"status"`
Type types.String `tfsdk:"type"`
UserGroupID types.String `tfsdk:"user_group_id"`
}
type ItemRuleInListDeletedModel struct {
GUID types.String `tfsdk:"guid"`
Key types.String `tfsdk:"key"`
Mode types.String `tfsdk:"mode"`
Policy types.String `tfsdk:"policy"`
Topology types.String `tfsdk:"topology"`
Value types.String `tfsdk:"value"`
}
type DiskInListDeletedModel struct {
DiskId types.Int64 `tfsdk:"disk_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
}
type ItemVNFInterfaceInListDeletedModel struct {
ConnID types.Int64 `tfsdk:"conn_id"`
ConnType types.String `tfsdk:"conn_type"`
DefGW types.String `tfsdk:"def_gw"`
Enabled types.Bool `tfsdk:"enabled"`
FLIPGroupID types.Int64 `tfsdk:"flip_group_id"`
GUID types.String `tfsdk:"guid"`
IPAddress types.String `tfsdk:"ip_address"`
ListenSSH types.Bool `tfsdk:"listen_ssh"`
MAC types.String `tfsdk:"mac"`
Name types.String `tfsdk:"name"`
NetID types.Int64 `tfsdk:"net_id"`
NetMask types.Int64 `tfsdk:"netmask"`
NetType types.String `tfsdk:"net_type"`
NodeID types.Int64 `tfsdk:"node_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
QOS *QOSInListModel `tfsdk:"qos"`
Target types.String `tfsdk:"target"`
Type types.String `tfsdk:"type"`
VNFs types.List `tfsdk:"vnfs"`
}
type QOSInListDeletedModel struct {
ERate types.Int64 `tfsdk:"e_rate"`
GUID types.String `tfsdk:"guid"`
InBurst types.Int64 `tfsdk:"in_burst"`
InRate types.Int64 `tfsdk:"in_rate"`
}
type ItemSnapSetInListDeletedModel struct {
Disks types.List `tfsdk:"disks"`
GUID types.String `tfsdk:"guid"`
Label types.String `tfsdk:"label"`
Timestamp types.Int64 `tfsdk:"timestamp"`
}

View File

@@ -0,0 +1,36 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ListPCIDevicesModel struct {
// request fields
ComputeID types.Int64 `tfsdk:"compute_id"`
RGID types.Int64 `tfsdk:"rg_id"`
DevID types.Int64 `tfsdk:"device_id"`
Name types.String `tfsdk:"name"`
Status types.String `tfsdk:"status"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemPCIDevice `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemPCIDevice struct {
ComputeID types.Int64 `tfsdk:"compute_id"`
Description types.String `tfsdk:"desc"`
GUID types.Int64 `tfsdk:"guid"`
HwPath types.String `tfsdk:"hwpath"`
ID types.Int64 `tfsdk:"device_id"`
Name types.String `tfsdk:"name"`
RGID types.Int64 `tfsdk:"rg_id"`
StackID types.Int64 `tfsdk:"stack_id"`
Status types.String `tfsdk:"status"`
SystemName types.String `tfsdk:"system_name"`
}

View File

@@ -0,0 +1,26 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ListPFWsModel struct {
// request fields
ComputeID types.Int64 `tfsdk:"compute_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemPFWModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemPFWModel struct {
PFWId types.Int64 `tfsdk:"pfw_id"`
LocalIP types.String `tfsdk:"local_ip"`
LocalPort types.Int64 `tfsdk:"local_port"`
Protocol types.String `tfsdk:"protocol"`
PublicPortEnd types.Int64 `tfsdk:"public_port_end"`
PublicPortStart types.Int64 `tfsdk:"public_port_start"`
VMID types.Int64 `tfsdk:"vm_id"`
}

View File

@@ -0,0 +1,23 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ListSnapShotsModel struct {
// request fields
ComputeID types.Int64 `tfsdk:"compute_id"`
Label types.String `tfsdk:"label"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemUsageSnapshotModel `tfsdk:"items"`
}
type ItemUsageSnapshotModel struct {
Count types.Int64 `tfsdk:"count"`
Stored types.Float64 `tfsdk:"stored"`
Label types.String `tfsdk:"label"`
Timestamp types.Int64 `tfsdk:"timestamp"`
}

View File

@@ -0,0 +1,31 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ListUsersModel struct {
// request fields
ComputeID types.Int64 `tfsdk:"compute_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items *RecordACLInListUsersModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type RecordACLInListUsersModel struct {
AccountACL []ItemACLInListUsersModel `tfsdk:"account_acl"`
ComputeACL []ItemACLInListUsersModel `tfsdk:"compute_acl"`
RGACL []ItemACLInListUsersModel `tfsdk:"rg_acl"`
}
type ItemACLInListUsersModel struct {
Explicit types.Bool `tfsdk:"explicit"`
GUID types.String `tfsdk:"guid"`
Right types.String `tfsdk:"right"`
Status types.String `tfsdk:"status"`
Type types.String `tfsdk:"type"`
UserGroupID types.String `tfsdk:"user_group_id"`
}

View File

@@ -0,0 +1,44 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ListVGPUsModel struct {
// request fields
ComputeID types.Int64 `tfsdk:"compute_id"`
GPUID types.Int64 `tfsdk:"gpu_id"`
Type types.String `tfsdk:"type"`
Status types.String `tfsdk:"status"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
IncludeDeleted types.Bool `tfsdk:"includedeleted"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemVGPU `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemVGPU struct {
AccountID types.Int64 `tfsdk:"account_id"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
ID types.Int64 `tfsdk:"vgpu_id"`
LastClaimedBy types.Int64 `tfsdk:"last_claimed_by"`
LastUpdateTime types.Int64 `tfsdk:"last_update_time"`
Mode types.String `tfsdk:"mode"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
PGPUID types.Int64 `tfsdk:"pgpuid"`
ProfileID types.Int64 `tfsdk:"profile_id"`
RAM types.Int64 `tfsdk:"ram"`
ReferenceID types.String `tfsdk:"reference_id"`
RGID types.Int64 `tfsdk:"rg_id"`
Status types.String `tfsdk:"status"`
Type types.String `tfsdk:"type"`
VMID types.Int64 `tfsdk:"vm_id"`
}

View File

@@ -0,0 +1,336 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/types"
disks "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
)
type ResourceComputeModel struct {
// required fields
Name types.String `tfsdk:"name"`
RGID types.Int64 `tfsdk:"rg_id"`
Driver types.String `tfsdk:"driver"`
CPU types.Int64 `tfsdk:"cpu"`
RAM types.Int64 `tfsdk:"ram"`
//optional fields
ImageID types.Int64 `tfsdk:"image_id"`
WithoutBootDisk types.Bool `tfsdk:"without_boot_disk"`
BootDiskSize types.Int64 `tfsdk:"boot_disk_size"`
AffinityLabel types.String `tfsdk:"affinity_label"`
AffinityRules types.Set `tfsdk:"affinity_rules"`
AntiAffinityRules types.Set `tfsdk:"anti_affinity_rules"`
CustomFields types.String `tfsdk:"custom_fields"`
Stateless types.Bool `tfsdk:"stateless"`
SepId types.Int64 `tfsdk:"sep_id"`
Pool types.String `tfsdk:"pool"`
ExtraDisks types.Set `tfsdk:"extra_disks"`
Network types.Set `tfsdk:"network"`
Tags types.Set `tfsdk:"tags"`
PortForwarding types.Set `tfsdk:"port_forwarding"`
UserAccess types.Set `tfsdk:"user_access"`
Snapshot types.Set `tfsdk:"snapshot"`
Rollback types.Object `tfsdk:"rollback"`
CD types.Object `tfsdk:"cd"`
PinToStack types.Bool `tfsdk:"pin_to_stack"`
Description types.String `tfsdk:"description"`
CloudInit types.String `tfsdk:"cloud_init"`
Enabled types.Bool `tfsdk:"enabled"`
Pause types.Bool `tfsdk:"pause"`
Reset types.Bool `tfsdk:"reset"`
Restore types.Bool `tfsdk:"restore"`
AutoStart types.Bool `tfsdk:"auto_start"`
ForceStop types.Bool `tfsdk:"force_stop"`
ForceResize types.Bool `tfsdk:"force_resize"`
DataDisks types.String `tfsdk:"data_disks"`
Started types.Bool `tfsdk:"started"`
DetachDisks types.Bool `tfsdk:"detach_disks"`
Permanently types.Bool `tfsdk:"permanently"`
IS types.String `tfsdk:"is"`
IpaType types.String `tfsdk:"ipa_type"`
NumaAffinity types.String `tfsdk:"numa_affinity"`
CPUPin types.Bool `tfsdk:"cpu_pin"`
HPBacked types.Bool `tfsdk:"hp_backed"`
// response fields
ID types.String `tfsdk:"id"`
AccountId types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
ACL types.Object `tfsdk:"acl"` //k8s
AffinityWeight types.Int64 `tfsdk:"affinity_weight"`
Architecture types.String `tfsdk:"arch"`
BootOrder types.List `tfsdk:"boot_order"`
BootDisk types.Object `tfsdk:"boot_disk"`
BootDiskId types.Int64 `tfsdk:"boot_disk_id"`
CdImageId types.Int64 `tfsdk:"cd_image_id"`
CloneReference types.Int64 `tfsdk:"clone_reference"`
Clones types.List `tfsdk:"clones"`
ComputeCIID types.Int64 `tfsdk:"computeci_id"`
ComputeId types.Int64 `tfsdk:"compute_id"`
CreatedBy types.String `tfsdk:"created_by"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedBy types.String `tfsdk:"deleted_by"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Devices types.String `tfsdk:"devices"`
Disks types.List `tfsdk:"disks"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
ImageName types.String `tfsdk:"image_name"`
Interfaces types.List `tfsdk:"interfaces"`
LockStatus types.String `tfsdk:"lock_status"`
ManagerID types.Int64 `tfsdk:"manager_id"`
ManagerType types.String `tfsdk:"manager_type"`
MigrationJob types.Int64 `tfsdk:"migrationjob"`
Milestones types.Int64 `tfsdk:"milestones"`
NatableVINSID types.Int64 `tfsdk:"natable_vins_id"`
NatableVINSIP types.String `tfsdk:"natable_vins_ip"`
NatableVINSName types.String `tfsdk:"natable_vins_name"`
NatableVINSNetwork types.String `tfsdk:"natable_vins_network"`
NatableVINSNetworkName types.String `tfsdk:"natable_vins_network_name"`
NeedReboot types.Bool `tfsdk:"need_reboot"`
NumaNodeId types.Int64 `tfsdk:"numa_node_id"`
OSUsers types.List `tfsdk:"os_users"`
Pinned types.Bool `tfsdk:"pinned"`
ReferenceID types.String `tfsdk:"reference_id"`
Registered types.Bool `tfsdk:"registered"`
ResName types.String `tfsdk:"res_name"`
ReservedNodeCpus types.List `tfsdk:"reserved_node_cpus"`
RGName types.String `tfsdk:"rg_name"`
SnapSets types.List `tfsdk:"snap_sets"`
StatelessSepID types.Int64 `tfsdk:"stateless_sep_id"`
StatelessSepType types.String `tfsdk:"stateless_sep_type"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
UpdatedBy types.String `tfsdk:"updated_by"`
UpdatedTime types.Int64 `tfsdk:"updated_time"`
UserManaged types.Bool `tfsdk:"user_managed"`
Userdata types.String `tfsdk:"user_data"`
VGPUs types.List `tfsdk:"vgpus"`
VirtualImageID types.Int64 `tfsdk:"virtual_image_id"`
VirtualImageName types.String `tfsdk:"virtual_image_name"`
}
type RecordResourceACLModel struct {
AccountACL types.List `tfsdk:"account_acl"`
ComputeACL types.List `tfsdk:"compute_acl"`
RGACL types.List `tfsdk:"rg_acl"`
}
type ItemResourceDiskModel struct {
CKey types.String `tfsdk:"ckey"`
ACL types.String `tfsdk:"acl"`
AccountID types.Int64 `tfsdk:"account_id"`
BootPartition types.Int64 `tfsdk:"boot_partition"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Description types.String `tfsdk:"desc"`
DestructionTime types.Int64 `tfsdk:"destruction_time"`
DiskPath types.String `tfsdk:"disk_path"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
ID types.Int64 `tfsdk:"disk_id"`
ImageID types.Int64 `tfsdk:"image_id"`
Images types.List `tfsdk:"images"`
IOTune types.Object `tfsdk:"iotune"`
IQN types.String `tfsdk:"iqn"`
Login types.String `tfsdk:"login"`
Milestones types.Int64 `tfsdk:"milestones"`
Name types.String `tfsdk:"name"`
Order types.Int64 `tfsdk:"order"`
Params types.String `tfsdk:"params"`
ParentID types.Int64 `tfsdk:"parent_id"`
Passwd types.String `tfsdk:"passwd"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
Pool types.String `tfsdk:"pool"`
PresentTo types.List `tfsdk:"present_to"`
PurgeTime types.Int64 `tfsdk:"purge_time"`
ReferenceID types.String `tfsdk:"reference_id"`
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
Replication types.Object `tfsdk:"replication"`
ResID types.String `tfsdk:"res_id"`
Role types.String `tfsdk:"role"`
SepID types.Int64 `tfsdk:"sep_id"`
Shareable types.Bool `tfsdk:"shareable"`
SizeMax types.Int64 `tfsdk:"size_max"`
SizeUsed types.Float64 `tfsdk:"size_used"`
Snapshots types.List `tfsdk:"snapshots"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
Type types.String `tfsdk:"type"`
VMID types.Int64 `tfsdk:"vmid"`
}
type ItemPortForfardingModel struct {
PublicPortStart types.Int64 `tfsdk:"public_port_start"`
PublicPortEnd types.Int64 `tfsdk:"public_port_end"`
LocalPort types.Int64 `tfsdk:"local_port"`
Proto types.String `tfsdk:"proto"`
}
type ItemNetworkModel struct {
NetType types.String `tfsdk:"net_type"`
NetId types.Int64 `tfsdk:"net_id"`
IpAddress types.String `tfsdk:"ip_address"`
Mac types.String `tfsdk:"mac"`
}
type ItemResourceInterfacesModel struct {
ConnID types.Int64 `tfsdk:"conn_id"`
ConnType types.String `tfsdk:"conn_type"`
GetGW types.String `tfsdk:"get_gw"`
Enabled types.Bool `tfsdk:"enabled"`
FLIPGroupID types.Int64 `tfsdk:"flip_group_id"`
GUID types.String `tfsdk:"guid"`
IPAddress types.String `tfsdk:"ip_address"`
ListenSSH types.Bool `tfsdk:"listen_ssh"`
MAC types.String `tfsdk:"mac"`
Name types.String `tfsdk:"name"`
NetID types.Int64 `tfsdk:"net_id"`
NetMask types.Int64 `tfsdk:"netmask"`
NetType types.String `tfsdk:"net_type"`
NodeID types.Int64 `tfsdk:"node_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
QOS types.Object `tfsdk:"qos"`
Target types.String `tfsdk:"target"`
Type types.String `tfsdk:"type"`
VNFs types.List `tfsdk:"vnfs"`
}
type ItemResourceRulesModel struct {
Key types.String `tfsdk:"key"`
Mode types.String `tfsdk:"mode"`
Policy types.String `tfsdk:"policy"`
Topology types.String `tfsdk:"topology"`
Value types.String `tfsdk:"value"`
}
var ItemNetwork = map[string]attr.Type{
"net_type": types.StringType,
"net_id": types.Int64Type,
"ip_address": types.StringType,
"mac": types.StringType,
}
var ItemDisk = map[string]attr.Type{
"ckey": types.StringType,
"acl": types.StringType,
"account_id": types.Int64Type,
"boot_partition": types.Int64Type,
"created_time": types.Int64Type,
"deleted_time": types.Int64Type,
"desc": types.StringType,
"destruction_time": types.Int64Type,
"disk_path": types.StringType,
"gid": types.Int64Type,
"guid": types.Int64Type,
"disk_id": types.Int64Type,
"image_id": types.Int64Type,
"images": types.ListType{ElemType: types.StringType},
"iotune": types.ObjectType{AttrTypes: disks.ItemIOTune},
"iqn": types.StringType,
"login": types.StringType,
"milestones": types.Int64Type,
"name": types.StringType,
"order": types.Int64Type,
"params": types.StringType,
"parent_id": types.Int64Type,
"passwd": types.StringType,
"pci_slot": types.Int64Type,
"pool": types.StringType,
"present_to": types.ListType{ElemType: types.Int64Type},
"purge_time": types.Int64Type,
"replication": types.ObjectType{AttrTypes: ItemReplication},
"reality_device_number": types.Int64Type,
"reference_id": types.StringType,
"res_id": types.StringType,
"role": types.StringType,
"sep_id": types.Int64Type,
"shareable": types.BoolType,
"size_max": types.Int64Type,
"size_used": types.Float64Type,
"snapshots": types.ListType{ElemType: types.ObjectType{AttrTypes: ItemSnapshot}},
"status": types.StringType,
"tech_status": types.StringType,
"type": types.StringType,
"vmid": types.Int64Type,
}
var ItemReplication = map[string]attr.Type{
"disk_id": types.Int64Type,
"pool_id": types.StringType,
"role": types.StringType,
"self_volume_id": types.StringType,
"storage_id": types.StringType,
"volume_id": types.StringType,
}
var ItemSnapshot = map[string]attr.Type{
"guid": types.StringType,
"label": types.StringType,
"reference_id": types.StringType,
"res_id": types.StringType,
"snap_set_guid": types.StringType,
"snap_set_time": types.Int64Type,
"timestamp": types.Int64Type,
}
var ListACL = map[string]attr.Type{
"account_acl": types.ListType{ElemType: types.ObjectType{AttrTypes: ItemACL}},
"compute_acl": types.ListType{ElemType: types.ObjectType{AttrTypes: ItemACL}},
"rg_acl": types.ListType{ElemType: types.ObjectType{AttrTypes: ItemACL}},
}
var ItemACL = map[string]attr.Type{
"explicit": types.BoolType,
"guid": types.StringType,
"right": types.StringType,
"status": types.StringType,
"type": types.StringType,
"user_group_id": types.StringType,
}
var ItemInterfaces = map[string]attr.Type{
"conn_id": types.Int64Type,
"conn_type": types.StringType,
"get_gw": types.StringType,
"enabled": types.BoolType,
"flip_group_id": types.Int64Type,
"guid": types.StringType,
"ip_address": types.StringType,
"listen_ssh": types.BoolType,
"mac": types.StringType,
"name": types.StringType,
"net_id": types.Int64Type,
"netmask": types.Int64Type,
"net_type": types.StringType,
"node_id": types.Int64Type,
"pci_slot": types.Int64Type,
"qos": types.ObjectType{AttrTypes: ItemQos},
"target": types.StringType,
"type": types.StringType,
"vnfs": types.ListType{ElemType: types.Int64Type},
}
var ItemQos = map[string]attr.Type{
"e_rate": types.Int64Type,
"guid": types.StringType,
"in_burst": types.Int64Type,
"in_rate": types.Int64Type,
}
var ItemOSUsers = map[string]attr.Type{
"guid": types.StringType,
"login": types.StringType,
"password": types.StringType,
"public_key": types.StringType,
}
var ItemSnapSets = map[string]attr.Type{
"disks": types.ListType{ElemType: types.Int64Type},
"guid": types.StringType,
"label": types.StringType,
"timestamp": types.Int64Type,
}

View File

@@ -0,0 +1,554 @@
package kvmvm
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/schemas"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
)
var (
_ resource.Resource = &resourceCompute{}
_ resource.ResourceWithImportState = &resourceCompute{}
)
// NewResourceCompute is a helper function to simplify the provider implementation.
func NewResourceCompute() resource.Resource {
return &resourceCompute{}
}
// resourceCompute is the resource implementation.
type resourceCompute struct {
client *decort.DecortClient
}
func (r *resourceCompute) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
// Get plan to create Compute
var plan models.ResourceComputeModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceCompute: Error receiving the plan")
return
}
tflog.Info(ctx, "Create resourceCompute: start creating", map[string]any{"name": plan.Name.ValueString()})
// Set timeouts
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout30m)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceCompute: Error set timeout")
return
}
tflog.Info(ctx, "Create resourceCompute: set timeouts successfully", map[string]any{
"name": plan.Name.ValueString(),
"createTimeout": createTimeout})
ctx, cancel := context.WithTimeout(ctx, createTimeout)
defer cancel()
// Check if input values are valid in the platform
tflog.Info(ctx, "Create resourceCompute: starting input checks", map[string]any{"name": plan.Name.ValueString()})
resp.Diagnostics.Append(resourceComputeInputChecks(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceCompute: Error input checks")
return
}
tflog.Info(ctx, "Create resourceCompute: input checks successful", map[string]any{"name": plan.Name.ValueString()})
// Make create request and get response for creation
ComputeId, diags := utilities.CreateResourceCompute(ctx, &plan, r.client)
if diags.HasError() {
resp.Diagnostics.Append(diags...)
tflog.Error(ctx, "Create resourceCompute: Error response for create resource Compute")
return
}
plan.ID = types.StringValue(strconv.Itoa(int(ComputeId)))
tflog.Info(ctx, "Create resourceCompute: new simple Compute created", map[string]any{"id": ComputeId, "name": plan.Name.ValueString()})
// attach extra disk(s) to new compute
if !plan.ExtraDisks.IsNull() {
resp.Diagnostics.Append(utilities.ComputeResourceExtraDiskCreate(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceCompute: error when attaching extra disk(s) to a new Compute ")
utilities.CleanupResourceCompute(ctx, ComputeId, r.client)
plan.ID = types.StringValue("")
return
}
}
// additional settings after Compute creation: in case of failures, warnings are added to resp.Diagnostics,
// because additional settings failure is not critical. If errors were added instead of warnings, terraform
// framework would mark resource as tainted and delete it, which would be unwanted behaviour.
// enable or disable Compute, warnings added to resp.Diagnostics in case of failure.
resp.Diagnostics.Append(utilities.ComputeResourceEnableDisable(ctx, &plan, r.client)...)
// Note bene: we created compute in a STOPPED state (this is required to properly attach 1st network interface),
// now we need to start it before we report the sequence complete
resp.Diagnostics.Append(utilities.ComputeResourceStartStop(ctx, &plan, r.client)...)
// add affinity_label if needed, warnings added to resp.Diagnostics in case of failure.
if !plan.AffinityLabel.IsUnknown() { //affinity_label is optional
resp.Diagnostics.Append(utilities.ComputeResourceAffinityLabel(ctx, &plan, r.client)...)
}
// add affinity_rules if needed, warnings added to resp.Diagnostics in case of failure.
if !plan.AffinityRules.IsNull() {
resp.Diagnostics.Append(utilities.ComputeResourceAffinityRules(ctx, &plan, r.client)...)
}
// add anti_affinity_rules if needed, warnings added to resp.Diagnostics in case of failure.
if !plan.AntiAffinityRules.IsNull() {
resp.Diagnostics.Append(utilities.ComputeResourceAntiAffinityRules(ctx, &plan, r.client)...)
}
// add tags if needed, warnings added to resp.Diagnostics in case of failure.
if !plan.Tags.IsNull() {
resp.Diagnostics.Append(utilities.ComputeResourceTags(ctx, &plan, r.client)...)
}
// add port_forwarding if needed, warnings added to resp.Diagnostics in case of failure.
if !plan.PortForwarding.IsNull() {
resp.Diagnostics.Append(utilities.ComputeResourcePortForwarding(ctx, &plan, r.client)...)
}
// add user_access if needed, warnings added to resp.Diagnostics in case of failure.
if !plan.UserAccess.IsNull() {
resp.Diagnostics.Append(utilities.ComputeResourceUserAccess(ctx, &plan, r.client)...)
}
// add snapshot if needed, warnings added to resp.Diagnostics in case of failure.
if !plan.Snapshot.IsNull() {
resp.Diagnostics.Append(utilities.ComputeResourceSnapshot(ctx, &plan, r.client)...)
}
// add cd if needed, warnings added to resp.Diagnostics in case of failure.
if !plan.CD.IsNull() {
resp.Diagnostics.Append(utilities.ComputeResourceCDInsert(ctx, &plan, r.client)...)
}
// pin to stack if needed, warnings added to resp.Diagnostics in case of failure.
if !plan.PinToStack.IsNull() && plan.PinToStack.ValueBool() {
resp.Diagnostics.Append(utilities.ComputeResourcePinToStack(ctx, &plan, r.client)...)
}
// pause if needed, warnings added to resp.Diagnostics in case of failure.
if !plan.Pause.IsNull() && plan.Pause.ValueBool() {
resp.Diagnostics.Append(utilities.ComputeResourcePause(ctx, &plan, r.client)...)
}
tflog.Info(ctx, "Create resourceCompute: resource creation is completed", map[string]any{"id": ComputeId})
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.ComputeResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set state to fully populated data
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
if resp.Diagnostics.HasError() {
return
}
}
func (r *resourceCompute) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
// Get current state
var state models.ResourceComputeModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceCompute: Error get state")
return
}
tflog.Info(ctx, "Read resourceCompute: got state successfully", map[string]any{"ID": state.ID.ValueString()})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceCompute: Error set timeout")
return
}
tflog.Info(ctx, "Read resourceCompute: set timeouts successfully", map[string]any{
"ID": state.ID.ValueString(),
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Read status Compute and if it is necessary to restore it
resp.Diagnostics.Append(utilities.ComputeReadStatus(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error read status or restore")
return
}
// Overwrite items with refreshed state
resp.Diagnostics.Append(flattens.ComputeResource(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceCompute: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceCompute: Error set state")
return
}
tflog.Info(ctx, "End read resourceCompute")
}
func (r *resourceCompute) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
// Retrieve values from plan
var plan models.ResourceComputeModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error receiving the plan")
return
}
tflog.Info(ctx, "Update resourceCompute: got plan successfully", map[string]any{"name": plan.Name.ValueString()})
// Retrieve values from state
var state models.ResourceComputeModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error receiving the state")
return
}
tflog.Info(ctx, "Update resourceCompute: got state successfully", map[string]any{"compute_id": state.ID.ValueString()})
// Set timeouts
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout30m)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error set timeout")
return
}
tflog.Info(ctx, "Update resourceCompute: set timeouts successfully", map[string]any{
"ID": state.ID.ValueString(),
"updateTimeout": updateTimeout})
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
defer cancel()
// Checking for values in the platform
tflog.Info(ctx, "Update resourceCompute: starting input checks", map[string]any{"compute_id": state.ID.ValueString()})
resp.Diagnostics.Append(resourceComputeInputChecks(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error input checks")
return
}
tflog.Info(ctx, "Update resourceCompute: input checks successful", map[string]any{"compute_id": state.ID.ValueString()})
// Read status Compute and if it is necessary to restore it
resp.Diagnostics.Append(utilities.ComputeReadStatus(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error read status or restore")
return
}
plan.ID = state.ID
// Enable/disable Compute if needed
if !plan.Enabled.Equal(state.Enabled) {
resp.Diagnostics.Append(utilities.ComputeResourceEnableDisable(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error enable/disable Compute")
return
}
}
// Start/stop Compute if needed
if !plan.Started.Equal(state.Started) {
resp.Diagnostics.Append(utilities.ComputeResourceStartStop(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error start/stop Compute")
return
}
}
// Resize Compute if needed
if !plan.CPU.Equal(state.CPU) || !plan.RAM.Equal(state.RAM) {
resp.Diagnostics.Append(utilities.ComputeResourceResize(ctx, &state, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error resize Compute")
return
}
}
// Resize boot disk size if needed
if !plan.BootDiskSize.Equal(state.BootDiskSize) {
resp.Diagnostics.Append(utilities.ComputeResourceBootDiskResize(ctx, &state, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error resize boot disk")
return
}
}
// Extra disk(s) update if needed
if !plan.ExtraDisks.Equal(state.ExtraDisks) {
resp.Diagnostics.Append(utilities.ComputeResourceExtraDiskUpdate(ctx, &state, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error update extra disk list")
return
}
}
// Network(s) update if needed
if !plan.Network.Equal(state.Network) && !plan.Network.IsUnknown() {
resp.Diagnostics.Append(utilities.ComputeResourceNetworkUpdate(ctx, &state, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error update network(s)")
return
}
}
// Compute parameters update if needed
if (!plan.Description.IsUnknown() && !plan.Description.Equal(state.Description)) || !plan.Name.Equal(state.Name) ||
!plan.NumaAffinity.Equal(state.NumaAffinity) || !plan.CPUPin.Equal(state.CPUPin) || !plan.HPBacked.Equal(state.HPBacked) {
resp.Diagnostics.Append(utilities.ComputeResourceComputeUpdate(ctx, &state, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error update compute parameters")
return
}
}
// Affinity label update if needed
if !plan.AffinityLabel.Equal(state.AffinityLabel) && !plan.AffinityLabel.IsUnknown() {
resp.Diagnostics.Append(utilities.ComputeResourceAffinityLabelUpdate(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error update affinity label")
return
}
}
// Affinity rules update if needed
if !plan.AffinityRules.Equal(state.AffinityRules) {
resp.Diagnostics.Append(utilities.ComputeResourceAffinityRulesUpdate(ctx, &state, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error update affinity rules")
return
}
}
// Anti affinity rules update if needed
if !plan.AntiAffinityRules.Equal(state.AntiAffinityRules) {
resp.Diagnostics.Append(utilities.ComputeResourceAntiAffinityRulesUpdate(ctx, &state, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error update anti affinity rules")
return
}
}
// Tags update if needed
if !plan.Tags.Equal(state.Tags) {
resp.Diagnostics.Append(utilities.ComputeResourceTagsUpdate(ctx, &state, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error update tags")
return
}
}
// Port forwarding update if needed
if !plan.PortForwarding.Equal(state.PortForwarding) {
resp.Diagnostics.Append(utilities.ComputeResourcePortForwardingUpdate(ctx, &state, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error update port forwarding rules")
return
}
}
// User access update if needed
if !plan.UserAccess.Equal(state.UserAccess) {
resp.Diagnostics.Append(utilities.ComputeResourceUserAccessUpdate(ctx, &state, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error update user(s) access rules")
return
}
}
// Snapshot update if needed
if !plan.Snapshot.Equal(state.Snapshot) {
resp.Diagnostics.Append(utilities.ComputeResourceSnapshotUpdate(ctx, &state, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error update snapshot(s)")
return
}
}
// Rollback if needed
if !plan.Rollback.Equal(state.Rollback) && !plan.Rollback.IsNull() {
resp.Diagnostics.Append(utilities.ComputeResourceRollback(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error rollback compute")
return
}
}
// Cd update if needed
if !plan.CD.Equal(state.CD) {
resp.Diagnostics.Append(utilities.ComputeResourceCDUpdate(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error update cd image update")
return
}
}
// pin to stack if needed
if !plan.PinToStack.Equal(state.PinToStack) && !plan.PinToStack.IsNull() {
resp.Diagnostics.Append(utilities.ComputeResourcePinToStackUpdate(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error pin/unpin to stack compute")
return
}
}
// compute pause if need
if !plan.Pause.Equal(state.Pause) && !plan.Pause.IsNull() {
resp.Diagnostics.Append(utilities.ComputeResourcePauseResumeCompute(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error pause/resume compute")
return
}
}
// reset compute if need
if !plan.Reset.Equal(state.Reset) && plan.Reset.ValueBool() {
resp.Diagnostics.Append(utilities.ComputeResourceResetCompute(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error reset compute")
return
}
}
// redeploy compute if need
if !plan.ImageID.Equal(state.ImageID) {
resp.Diagnostics.Append(utilities.ComputeResourceRedeploy(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error redeploy compute")
return
}
}
// custom fields update if needed
if !plan.CustomFields.Equal(state.CustomFields) && !plan.CustomFields.IsUnknown() {
resp.Diagnostics.Append(utilities.ComputeResourceCustomFieldUpdate(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error update custom fields")
return
}
}
tflog.Info(ctx, "Update resourceCompute: resource update is completed", map[string]any{"ID": plan.ID.ValueString()})
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.ComputeResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set state to fully populated data
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
if resp.Diagnostics.HasError() {
return
}
}
func (r *resourceCompute) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
// Get current state
var state models.ResourceComputeModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceCompute: Error get state")
return
}
tflog.Info(ctx, "Delete resourceCompute: got state successfully", map[string]any{"ID": state.ID.ValueString()})
// Set timeouts
readTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceCompute: Error set timeout")
return
}
tflog.Info(ctx, "Delete resourceCompute: set timeouts successfully", map[string]any{
"id": state.ID.ValueString(),
"deleteTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
var permanently bool
if state.Permanently.IsNull() {
permanently = true
} else {
permanently = state.Permanently.ValueBool()
}
var detach bool
if state.DetachDisks.IsNull() {
detach = true
} else {
detach = state.DetachDisks.ValueBool()
}
// Delete existing Compute
delReq := compute.DeleteRequest{
ComputeID: uint64(state.ComputeId.ValueInt64()),
Permanently: permanently,
DetachDisks: detach,
}
tflog.Info(ctx, "Delete resourceCompute: calling CloudAPI().Compute().Delete", map[string]any{
"ID": state.ID.ValueString(),
"req": delReq,
})
_, err := r.client.CloudAPI().Compute().Delete(ctx, delReq)
if err != nil {
resp.Diagnostics.AddError("Delete resourceCompute: Error deleting", err.Error())
return
}
tflog.Info(ctx, "End delete resource Compute ", map[string]any{"id": state.ID.ValueString()})
}
func (r *resourceCompute) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaResourceCompute(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
},
}
}
func (r *resourceCompute) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_kvmvm"
}
// Configure adds the provider configured client to the resource.
func (r *resourceCompute) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure resourceCompute")
r.client = client.Resource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure resourceCompute successfully")
}
func (r *resourceCompute) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
// Retrieve import ID and save to id attribute
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
}

View File

@@ -0,0 +1,670 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceCompute() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"compute_id": schema.Int64Attribute{
Required: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"acl": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"account_acl": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"explicit": schema.BoolAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"right": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"user_group_id": schema.StringAttribute{
Computed: true,
},
},
},
},
"compute_acl": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"explicit": schema.BoolAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"right": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"user_group_id": schema.StringAttribute{
Computed: true,
},
},
},
},
"rg_acl": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"explicit": schema.BoolAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"right": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"user_group_id": schema.StringAttribute{
Computed: true,
},
},
},
},
},
},
"account_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"affinity_label": schema.StringAttribute{
Computed: true,
},
"affinity_rules": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"key": schema.StringAttribute{
Computed: true,
},
"mode": schema.StringAttribute{
Computed: true,
},
"policy": schema.StringAttribute{
Computed: true,
},
"topology": schema.StringAttribute{
Computed: true,
},
"value": schema.StringAttribute{
Computed: true,
},
},
},
},
"affinity_weight": schema.Int64Attribute{
Computed: true,
},
"anti_affinity_rules": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"key": schema.StringAttribute{
Computed: true,
},
"mode": schema.StringAttribute{
Computed: true,
},
"policy": schema.StringAttribute{
Computed: true,
},
"topology": schema.StringAttribute{
Computed: true,
},
"value": schema.StringAttribute{
Computed: true,
},
},
},
},
"arch": schema.StringAttribute{
Computed: true,
},
"boot_order": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"bootdisk_size": schema.Int64Attribute{
Computed: true,
},
"cd_image_id": schema.Int64Attribute{
Computed: true,
},
"clone_reference": schema.Int64Attribute{
Computed: true,
},
"clones": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"computeci_id": schema.Int64Attribute{
Computed: true,
},
"cpu_pin": schema.BoolAttribute{
Computed: true,
},
"cpus": schema.Int64Attribute{
Computed: true,
},
"created_by": schema.StringAttribute{
Computed: true,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"custom_fields": schema.StringAttribute{
Computed: true,
},
"deleted_by": schema.StringAttribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"devices": schema.StringAttribute{
Computed: true,
},
"disks": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"ckey": schema.StringAttribute{
Computed: true,
},
"acl": schema.StringAttribute{
Computed: true,
},
"account_id": schema.Int64Attribute{
Computed: true,
},
"boot_partition": schema.Int64Attribute{
Computed: true,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"destruction_time": schema.Int64Attribute{
Computed: true,
},
"disk_path": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"disk_id": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"images": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"iotune": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"read_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"read_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"size_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
},
},
"iqn": schema.StringAttribute{
Computed: true,
},
"login": schema.StringAttribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
"order": schema.Int64Attribute{
Computed: true,
},
"params": schema.StringAttribute{
Computed: true,
},
"parent_id": schema.Int64Attribute{
Computed: true,
},
"passwd": schema.StringAttribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"pool": schema.StringAttribute{
Computed: true,
},
"present_to": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"purge_time": schema.Int64Attribute{
Computed: true,
},
"replication": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Computed: true,
},
"pool_id": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"self_volume_id": schema.StringAttribute{
Computed: true,
},
"storage_id": schema.StringAttribute{
Computed: true,
},
"volume_id": schema.StringAttribute{
Computed: true,
},
},
},
"reality_device_number": schema.Int64Attribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"sep_id": schema.Int64Attribute{
Computed: true,
},
"shareable": schema.BoolAttribute{
Computed: true,
},
"size_max": schema.Int64Attribute{
Computed: true,
},
"size_used": schema.Float64Attribute{
Computed: true,
},
"snapshots": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vmid": schema.Int64Attribute{
Computed: true,
},
},
},
},
"driver": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"hp_backed": schema.BoolAttribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"image_name": schema.StringAttribute{
Computed: true,
},
"interfaces": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"conn_id": schema.Int64Attribute{
Computed: true,
},
"conn_type": schema.StringAttribute{
Computed: true,
},
"def_gw": schema.StringAttribute{
Computed: true,
},
"enabled": schema.BoolAttribute{
Computed: true,
},
"flip_group_id": schema.Int64Attribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"ip_address": schema.StringAttribute{
Computed: true,
},
"listen_ssh": schema.BoolAttribute{
Computed: true,
},
"mac": schema.StringAttribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
"net_id": schema.Int64Attribute{
Computed: true,
},
"netmask": schema.Int64Attribute{
Computed: true,
},
"net_type": schema.StringAttribute{
Computed: true,
},
"node_id": schema.Int64Attribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"qos": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"e_rate": schema.Int64Attribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"in_burst": schema.Int64Attribute{
Computed: true,
},
"in_rate": schema.Int64Attribute{
Computed: true,
},
},
},
"target": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vnfs": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
},
},
},
"lock_status": schema.StringAttribute{
Computed: true,
},
"manager_id": schema.Int64Attribute{
Computed: true,
},
"manager_type": schema.StringAttribute{
Computed: true,
},
"migrationjob": schema.Int64Attribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
"need_reboot": schema.BoolAttribute{
Computed: true,
},
"numa_affinity": schema.StringAttribute{
Computed: true,
},
"numa_node_id": schema.Int64Attribute{
Computed: true,
},
"natable_vins_id": schema.Int64Attribute{
Computed: true,
},
"natable_vins_ip": schema.StringAttribute{
Computed: true,
},
"natable_vins_name": schema.StringAttribute{
Computed: true,
},
"natable_vins_network": schema.StringAttribute{
Computed: true,
},
"natable_vins_network_name": schema.StringAttribute{
Computed: true,
},
"os_users": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"login": schema.StringAttribute{
Computed: true,
},
"password": schema.StringAttribute{
Computed: true,
},
"public_key": schema.StringAttribute{
Computed: true,
},
},
},
},
"pinned": schema.BoolAttribute{
Computed: true,
},
"ram": schema.Int64Attribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"registered": schema.BoolAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"reserved_node_cpus": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"rg_id": schema.Int64Attribute{
Computed: true,
},
"rg_name": schema.StringAttribute{
Computed: true,
},
"snap_sets": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"disks": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"stateless_sep_id": schema.Int64Attribute{
Computed: true,
},
"stateless_sep_type": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"tags": schema.MapAttribute{
Computed: true,
ElementType: types.StringType,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"updated_by": schema.StringAttribute{
Computed: true,
},
"updated_time": schema.Int64Attribute{
Computed: true,
},
"user_data": schema.StringAttribute{
Computed: true,
},
"user_managed": schema.BoolAttribute{
Computed: true,
},
"vgpus": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"virtual_image_id": schema.Int64Attribute{
Computed: true,
},
"virtual_image_name": schema.StringAttribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,40 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceComputeAudits() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"compute_id": schema.Int64Attribute{
Required: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"call": schema.StringAttribute{
Computed: true,
},
"responsetime": schema.Float64Attribute{
Computed: true,
},
"statuscode": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Float64Attribute{
Computed: true,
},
"user": schema.StringAttribute{
Computed: true,
},
},
},
},
}
}

View File

@@ -0,0 +1,31 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceComputeGetAudits() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"compute_id": schema.Int64Attribute{
Required: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"epoch": schema.Float64Attribute{
Computed: true,
},
"message": schema.StringAttribute{
Computed: true,
},
},
},
},
}
}

View File

@@ -0,0 +1,21 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceComputeGetConsoleUrl() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"compute_id": schema.Int64Attribute{
Required: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"console_url": schema.StringAttribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,24 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceComputeGetLog() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"compute_id": schema.Int64Attribute{
Required: true,
},
"path": schema.StringAttribute{
Required: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"log": schema.StringAttribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,443 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceComputeList() map[string]schema.Attribute {
return map[string]schema.Attribute{
// optional attributes
"by_id": schema.Int64Attribute{
Optional: true,
Description: "Find by ID",
},
"name": schema.StringAttribute{
Optional: true,
Description: "Find by name",
},
"account_id": schema.Int64Attribute{
Optional: true,
Description: "Find by AccountID",
},
"rg_name": schema.StringAttribute{
Optional: true,
Description: "Find by resgroup name",
},
"rg_id": schema.Int64Attribute{
Optional: true,
Description: "Find by RGID",
},
"tech_status": schema.StringAttribute{
Optional: true,
Description: "Find by tech status",
},
"status": schema.StringAttribute{
Optional: true,
Description: "Find by status",
},
"ip_address": schema.StringAttribute{
Optional: true,
Description: "Find by IP address",
},
"extnet_name": schema.StringAttribute{
Optional: true,
Description: "Find by Extnet name",
},
"extnet_id": schema.Int64Attribute{
Optional: true,
Description: "Find by Extnet ID",
},
"includedeleted": schema.BoolAttribute{
Optional: true,
Description: "Include deleted computes. If using field 'status', then includedeleted will be ignored",
},
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "Page number",
},
"size": schema.Int64Attribute{
Optional: true,
Description: "Page size",
},
"ignore_k8s": schema.BoolAttribute{
Optional: true,
Description: "If set to true, ignores any VMs associated with any k8s cluster",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"acl": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"explicit": schema.BoolAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"right": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"user_group_id": schema.StringAttribute{
Computed: true,
},
},
},
},
"account_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"affinity_label": schema.StringAttribute{
Computed: true,
},
"affinity_rules": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"key": schema.StringAttribute{
Computed: true,
},
"mode": schema.StringAttribute{
Computed: true,
},
"policy": schema.StringAttribute{
Computed: true,
},
"topology": schema.StringAttribute{
Computed: true,
},
"value": schema.StringAttribute{
Computed: true,
},
},
},
},
"affinity_weight": schema.Int64Attribute{
Computed: true,
},
"anti_affinity_rules": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"key": schema.StringAttribute{
Computed: true,
},
"mode": schema.StringAttribute{
Computed: true,
},
"policy": schema.StringAttribute{
Computed: true,
},
"topology": schema.StringAttribute{
Computed: true,
},
"value": schema.StringAttribute{
Computed: true,
},
},
},
},
"arch": schema.StringAttribute{
Computed: true,
},
"boot_order": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"bootdisk_size": schema.Int64Attribute{
Computed: true,
},
"cd_image_id": schema.Int64Attribute{
Computed: true,
},
"clone_reference": schema.Int64Attribute{
Computed: true,
},
"clones": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"computeci_id": schema.Int64Attribute{
Computed: true,
},
"cpu_pin": schema.BoolAttribute{
Computed: true,
},
"cpus": schema.Int64Attribute{
Computed: true,
},
"created_by": schema.StringAttribute{
Computed: true,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"custom_fields": schema.StringAttribute{
Computed: true,
},
"deleted_by": schema.StringAttribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"devices": schema.StringAttribute{
Computed: true,
},
"disks": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
},
},
},
"driver": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"hp_backed": schema.BoolAttribute{
Computed: true,
},
"compute_id": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"interfaces": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"conn_id": schema.Int64Attribute{
Computed: true,
},
"conn_type": schema.StringAttribute{
Computed: true,
},
"def_gw": schema.StringAttribute{
Computed: true,
},
"enabled": schema.BoolAttribute{
Computed: true,
},
"flip_group_id": schema.Int64Attribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"ip_address": schema.StringAttribute{
Computed: true,
},
"listen_ssh": schema.BoolAttribute{
Computed: true,
},
"mac": schema.StringAttribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
"net_id": schema.Int64Attribute{
Computed: true,
},
"netmask": schema.Int64Attribute{
Computed: true,
},
"net_type": schema.StringAttribute{
Computed: true,
},
"node_id": schema.Int64Attribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"qos": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"e_rate": schema.Int64Attribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"in_burst": schema.Int64Attribute{
Computed: true,
},
"in_rate": schema.Int64Attribute{
Computed: true,
},
},
},
"target": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vnfs": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
},
},
},
"lock_status": schema.StringAttribute{
Computed: true,
},
"manager_id": schema.Int64Attribute{
Computed: true,
},
"manager_type": schema.StringAttribute{
Computed: true,
},
"migrationjob": schema.Int64Attribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
"need_reboot": schema.BoolAttribute{
Computed: true,
},
"numa_affinity": schema.StringAttribute{
Computed: true,
},
"numa_node_id": schema.Int64Attribute{
Computed: true,
},
"pinned": schema.BoolAttribute{
Computed: true,
},
"ram": schema.Int64Attribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"registered": schema.BoolAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"reserved_node_cpus": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"rg_id": schema.Int64Attribute{
Computed: true,
},
"rg_name": schema.StringAttribute{
Computed: true,
},
"snap_sets": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"disks": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"stateless_sep_id": schema.Int64Attribute{
Computed: true,
},
"stateless_sep_type": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"tags": schema.MapAttribute{
Computed: true,
ElementType: types.StringType,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"total_disks_size": schema.Int64Attribute{
Computed: true,
},
"updated_by": schema.StringAttribute{
Computed: true,
},
"updated_time": schema.Int64Attribute{
Computed: true,
},
"user_managed": schema.BoolAttribute{
Computed: true,
},
"vgpus": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"vins_connected": schema.Int64Attribute{
Computed: true,
},
"virtual_image_id": schema.Int64Attribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,435 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceComputeListDeleted() map[string]schema.Attribute {
return map[string]schema.Attribute{
// optional attributes
"by_id": schema.Int64Attribute{
Optional: true,
Description: "Find by ID",
},
"name": schema.StringAttribute{
Optional: true,
Description: "Find by name",
},
"account_id": schema.Int64Attribute{
Optional: true,
Description: "Find by AccountID",
},
"rg_name": schema.StringAttribute{
Optional: true,
Description: "Find by resgroup name",
},
"rg_id": schema.Int64Attribute{
Optional: true,
Description: "Find by RGID",
},
"tech_status": schema.StringAttribute{
Optional: true,
Description: "Find by tech status",
},
"ip_address": schema.StringAttribute{
Optional: true,
Description: "Find by IP address",
},
"extnet_name": schema.StringAttribute{
Optional: true,
Description: "Find by Extnet name",
},
"extnet_id": schema.Int64Attribute{
Optional: true,
Description: "Find by Extnet ID",
},
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "Page number",
},
"size": schema.Int64Attribute{
Optional: true,
Description: "Page size",
},
"ignore_k8s": schema.BoolAttribute{
Optional: true,
Description: "If set to true, ignores any VMs associated with any k8s cluster",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"acl": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"explicit": schema.BoolAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"right": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"user_group_id": schema.StringAttribute{
Computed: true,
},
},
},
},
"account_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"affinity_label": schema.StringAttribute{
Computed: true,
},
"affinity_rules": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"key": schema.StringAttribute{
Computed: true,
},
"mode": schema.StringAttribute{
Computed: true,
},
"policy": schema.StringAttribute{
Computed: true,
},
"topology": schema.StringAttribute{
Computed: true,
},
"value": schema.StringAttribute{
Computed: true,
},
},
},
},
"affinity_weight": schema.Int64Attribute{
Computed: true,
},
"anti_affinity_rules": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"key": schema.StringAttribute{
Computed: true,
},
"mode": schema.StringAttribute{
Computed: true,
},
"policy": schema.StringAttribute{
Computed: true,
},
"topology": schema.StringAttribute{
Computed: true,
},
"value": schema.StringAttribute{
Computed: true,
},
},
},
},
"arch": schema.StringAttribute{
Computed: true,
},
"boot_order": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"bootdisk_size": schema.Int64Attribute{
Computed: true,
},
"cd_image_id": schema.Int64Attribute{
Computed: true,
},
"clone_reference": schema.Int64Attribute{
Computed: true,
},
"clones": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"computeci_id": schema.Int64Attribute{
Computed: true,
},
"cpu_pin": schema.BoolAttribute{
Computed: true,
},
"cpus": schema.Int64Attribute{
Computed: true,
},
"created_by": schema.StringAttribute{
Computed: true,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"custom_fields": schema.StringAttribute{
Computed: true,
},
"deleted_by": schema.StringAttribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"devices": schema.StringAttribute{
Computed: true,
},
"disks": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
},
},
},
"driver": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"hp_backed": schema.BoolAttribute{
Computed: true,
},
"compute_id": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"interfaces": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"conn_id": schema.Int64Attribute{
Computed: true,
},
"conn_type": schema.StringAttribute{
Computed: true,
},
"def_gw": schema.StringAttribute{
Computed: true,
},
"enabled": schema.BoolAttribute{
Computed: true,
},
"flip_group_id": schema.Int64Attribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"ip_address": schema.StringAttribute{
Computed: true,
},
"listen_ssh": schema.BoolAttribute{
Computed: true,
},
"mac": schema.StringAttribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
"net_id": schema.Int64Attribute{
Computed: true,
},
"netmask": schema.Int64Attribute{
Computed: true,
},
"net_type": schema.StringAttribute{
Computed: true,
},
"node_id": schema.Int64Attribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"qos": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"e_rate": schema.Int64Attribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"in_burst": schema.Int64Attribute{
Computed: true,
},
"in_rate": schema.Int64Attribute{
Computed: true,
},
},
},
"target": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vnfs": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
},
},
},
"lock_status": schema.StringAttribute{
Computed: true,
},
"manager_id": schema.Int64Attribute{
Computed: true,
},
"manager_type": schema.StringAttribute{
Computed: true,
},
"migrationjob": schema.Int64Attribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
"need_reboot": schema.BoolAttribute{
Computed: true,
},
"numa_affinity": schema.StringAttribute{
Computed: true,
},
"numa_node_id": schema.Int64Attribute{
Computed: true,
},
"pinned": schema.BoolAttribute{
Computed: true,
},
"ram": schema.Int64Attribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"registered": schema.BoolAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"reserved_node_cpus": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"rg_id": schema.Int64Attribute{
Computed: true,
},
"rg_name": schema.StringAttribute{
Computed: true,
},
"snap_sets": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"disks": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"stateless_sep_id": schema.Int64Attribute{
Computed: true,
},
"stateless_sep_type": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"tags": schema.MapAttribute{
Computed: true,
ElementType: types.StringType,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"total_disks_size": schema.Int64Attribute{
Computed: true,
},
"updated_by": schema.StringAttribute{
Computed: true,
},
"updated_time": schema.Int64Attribute{
Computed: true,
},
"user_managed": schema.BoolAttribute{
Computed: true,
},
"vgpus": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"vins_connected": schema.Int64Attribute{
Computed: true,
},
"virtual_image_id": schema.Int64Attribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,81 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceComputePciDeviceList() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"compute_id": schema.Int64Attribute{
Required: true,
},
// optional attributes
"rg_id": schema.Int64Attribute{
Optional: true,
},
"device_id": schema.Int64Attribute{
Optional: true,
},
"name": schema.StringAttribute{
Optional: true,
},
"status": schema.StringAttribute{
Optional: true,
},
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": schema.Int64Attribute{
Optional: true,
},
"size": schema.Int64Attribute{
Optional: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"compute_id": schema.Int64Attribute{
Computed: true,
},
"description": schema.StringAttribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"hwpath": schema.StringAttribute{
Computed: true,
},
"device_id": schema.Int64Attribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
"rg_id": schema.Int64Attribute{
Computed: true,
},
"stack_id": schema.Int64Attribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"system_name": schema.StringAttribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,49 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceComputePFWList() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"compute_id": schema.Int64Attribute{
Required: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"pfw_id": schema.Int64Attribute{
Computed: true,
},
"local_ip": schema.StringAttribute{
Computed: true,
},
"local_port": schema.Int64Attribute{
Computed: true,
},
"protocol": schema.StringAttribute{
Computed: true,
},
"public_port_end": schema.Int64Attribute{
Computed: true,
},
"public_port_start": schema.Int64Attribute{
Computed: true,
},
"vm_id": schema.Int64Attribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,41 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceComputeSnapshotUsage() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"compute_id": schema.Int64Attribute{
Required: true,
},
// optional attributes
"label": schema.StringAttribute{
Optional: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"count": schema.Int64Attribute{
Computed: true,
},
"stored": schema.Float64Attribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
}
}

View File

@@ -0,0 +1,101 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceComputeUserList() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"compute_id": schema.Int64Attribute{
Required: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"account_acl": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"explicit": schema.BoolAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"right": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"user_group_id": schema.StringAttribute{
Computed: true,
},
},
},
},
"compute_acl": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"explicit": schema.BoolAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"right": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"user_group_id": schema.StringAttribute{
Computed: true,
},
},
},
},
"rg_acl": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"explicit": schema.BoolAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"right": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"user_group_id": schema.StringAttribute{
Computed: true,
},
},
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,105 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceComputeVGPUList() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"compute_id": schema.Int64Attribute{
Required: true,
},
// optional attributes
"gpu_id": schema.Int64Attribute{
Optional: true,
},
"type": schema.StringAttribute{
Optional: true,
},
"status": schema.StringAttribute{
Optional: true,
},
"includedeleted": schema.BoolAttribute{
Optional: true,
},
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": schema.Int64Attribute{
Optional: true,
},
"size": schema.Int64Attribute{
Optional: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"account_id": schema.Int64Attribute{
Computed: true,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"vgpu_id": schema.Int64Attribute{
Computed: true,
},
"last_claimed_by": schema.Int64Attribute{
Computed: true,
},
"last_update_time": schema.Int64Attribute{
Computed: true,
},
"mode": schema.StringAttribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"pgpuid": schema.Int64Attribute{
Computed: true,
},
"profile_id": schema.Int64Attribute{
Computed: true,
},
"ram": schema.Int64Attribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"rg_id": schema.Int64Attribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vm_id": schema.Int64Attribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,952 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework-validators/int64validator"
"github.com/hashicorp/terraform-plugin-framework-validators/setvalidator"
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
"github.com/hashicorp/terraform-plugin-framework/types"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/validate"
)
func MakeSchemaResourceCompute() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"name": schema.StringAttribute{
Required: true,
Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.",
},
"rg_id": schema.Int64Attribute{
Required: true,
Validators: []validator.Int64{
int64validator.AtLeast(1),
},
Description: "ID of the resource group where this compute should be deployed.",
},
"driver": schema.StringAttribute{
Required: true,
Validators: []validator.String{
stringvalidator.OneOf("SVA_KVM_X86", "KVM_X86", "KVM_PPC"),
},
Description: "Hardware architecture of this compute instance.",
},
"cpu": schema.Int64Attribute{
Required: true,
Validators: []validator.Int64{
int64validator.Between(1, constants.MaxCpusPerCompute),
},
Description: "Number of CPUs to allocate to this compute instance.",
},
"ram": schema.Int64Attribute{
Required: true,
Validators: []validator.Int64{
validate.DivisibleBy(constants.DivisibleByRAM),
},
Description: "Amount of RAM in MB to allocate to this compute instance.",
},
// optional attributes
"image_id": schema.Int64Attribute{
Optional: true,
Description: "ID of the OS image to base this compute instance on.",
},
"without_boot_disk": schema.BoolAttribute{ //default false
Optional: true,
Description: "If True, the imageId, bootDisk, sepId, pool parameters are ignored and the compute is created without a boot disk in the stopped state.",
},
"boot_disk_size": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image.",
},
"affinity_label": schema.StringAttribute{
Optional: true,
Computed: true,
Description: "Set affinity label for compute",
},
"affinity_rules": schema.SetNestedAttribute{
Optional: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"topology": schema.StringAttribute{
Required: true,
Validators: []validator.String{
stringvalidator.OneOf("node", "compute"),
},
Description: "compute or node, for whom rule applies",
},
"policy": schema.StringAttribute{
Required: true,
Validators: []validator.String{
stringvalidator.OneOf("RECOMMENDED", "REQUIRED"),
},
Description: "RECOMMENDED or REQUIRED, the degree of 'strictness' of this rule",
},
"mode": schema.StringAttribute{
Required: true,
Validators: []validator.String{
stringvalidator.OneOf("EQ", "NE", "ANY"),
},
Description: "EQ or NE or ANY - the comparison mode is 'value', recorded by the specified 'key'",
},
"key": schema.StringAttribute{
Required: true,
Description: "key that are taken into account when analyzing this rule will be identified",
},
"value": schema.StringAttribute{
Optional: true,
Description: "value that must match the key to be taken into account when analyzing this rule",
},
},
},
},
"anti_affinity_rules": schema.SetNestedAttribute{
Optional: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"topology": schema.StringAttribute{
Required: true,
Validators: []validator.String{
stringvalidator.OneOf("node", "compute"),
},
Description: "compute or node, for whom rule applies",
},
"policy": schema.StringAttribute{
Required: true,
Validators: []validator.String{
stringvalidator.OneOf("RECOMMENDED", "REQUIRED"),
},
Description: "RECOMMENDED or REQUIRED, the degree of 'strictness' of this rule",
},
"mode": schema.StringAttribute{
Required: true,
Validators: []validator.String{
stringvalidator.OneOf("EQ", "NE", "ANY"),
},
Description: "EQ or NE or ANY - the comparison mode is 'value', recorded by the specified 'key'",
},
"key": schema.StringAttribute{
Required: true,
Description: "key that are taken into account when analyzing this rule will be identified",
},
"value": schema.StringAttribute{
Optional: true,
Description: "value that must match the key to be taken into account when analyzing this rule",
},
},
},
},
"custom_fields": schema.StringAttribute{
Optional: true,
Computed: true,
Description: "custom fields for Compute. Must be dict",
},
"stateless": schema.BoolAttribute{ //default false
Optional: true,
Description: "Compute will be stateless (SVA_KVM_X86) if set to True",
},
"sep_id": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "ID of SEP to create bootDisk on. Uses image's sepId if not set.",
},
"pool": schema.StringAttribute{
Optional: true,
Computed: true,
Description: "Pool to use if sepId is set, can be also empty if needed to be chosen by system.",
},
"extra_disks": schema.SetAttribute{
Optional: true,
//Computed: true,
Validators: []validator.Set{
setvalidator.SizeAtMost(constants.MaxExtraDisksPerCompute),
},
ElementType: types.Int64Type,
Description: "Optional list of IDs of extra disks to attach to this compute. You may specify several extra disks.",
},
"network": schema.SetNestedAttribute{
Optional: true,
Computed: true,
Validators: []validator.Set{
setvalidator.SizeBetween(1, constants.MaxNetworksPerCompute),
},
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.",
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"net_type": schema.StringAttribute{
Required: true,
Validators: []validator.String{
stringvalidator.OneOf("EXTNET", "VINS", "VFNIC"),
},
Description: "Type of the network for this connection, either EXTNET or VINS.",
},
"net_id": schema.Int64Attribute{
Required: true,
Description: "ID of the network for this connection.",
},
"ip_address": schema.StringAttribute{
Optional: true,
Computed: true,
Description: "Optional IP address to assign to this connection. This IP should belong to the selected network and free for use.",
},
"mac": schema.StringAttribute{
Computed: true,
Description: "MAC address associated with this connection. MAC address is assigned automatically.",
},
},
},
},
"tags": schema.SetNestedAttribute{
Optional: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"key": schema.StringAttribute{
Required: true,
},
"value": schema.StringAttribute{
Required: true,
},
},
},
},
"port_forwarding": schema.SetNestedAttribute{
Optional: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"public_port_start": schema.Int64Attribute{
Required: true,
},
"public_port_end": schema.Int64Attribute{
Optional: true,
//Default: -1,
},
"local_port": schema.Int64Attribute{
Optional: true,
},
"proto": schema.StringAttribute{
Required: true,
Validators: []validator.String{
stringvalidator.OneOf("tcp", "udp"),
},
},
},
},
},
"user_access": schema.SetNestedAttribute{
Optional: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"username": schema.StringAttribute{
Required: true,
},
"access_type": schema.StringAttribute{
Required: true,
},
},
},
},
"snapshot": schema.SetNestedAttribute{
Optional: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"label": schema.StringAttribute{
Required: true,
},
},
},
},
"rollback": schema.SingleNestedAttribute{
Optional: true,
Attributes: map[string]schema.Attribute{
"label": schema.StringAttribute{
Required: true,
},
},
},
"cd": schema.SingleNestedAttribute{
Optional: true,
Attributes: map[string]schema.Attribute{
"cdrom_id": schema.Int64Attribute{
Required: true,
},
},
},
"pin_to_stack": schema.BoolAttribute{
Optional: true,
},
"description": schema.StringAttribute{
Optional: true,
Computed: true,
Description: "Optional text description of this compute instance.",
},
"cloud_init": schema.StringAttribute{
Optional: true,
Description: "Optional cloud_init parameters. Applied when creating new compute instance only, ignored in all other cases.",
},
"enabled": schema.BoolAttribute{
Optional: true,
Description: "If true - enable compute, else - disable",
},
"pause": schema.BoolAttribute{
Optional: true,
//Default: false,
},
"reset": schema.BoolAttribute{
Optional: true,
//Default: false,
},
"restore": schema.BoolAttribute{
Optional: true,
//Default: true,
},
"auto_start": schema.BoolAttribute{
Optional: true,
//Default: false,
Description: "Flag for redeploy compute",
},
"force_stop": schema.BoolAttribute{
Optional: true,
//Default: false,
Description: "Flag for redeploy compute",
},
"force_resize": schema.BoolAttribute{
Optional: true,
//Default: false,
Description: "Flag for resize compute",
},
"data_disks": schema.StringAttribute{
Optional: true,
Validators: []validator.String{
stringvalidator.OneOf("KEEP", "DETACH", "DESTROY"),
},
//Default: "DETACH",
Description: "Flag for redeploy compute",
},
"started": schema.BoolAttribute{
Optional: true,
//Default: true,
Description: "Is compute started.",
},
"detach_disks": schema.BoolAttribute{
Optional: true,
//Default: true,
},
"permanently": schema.BoolAttribute{
Optional: true,
//Default: true,
},
"is": schema.StringAttribute{
Optional: true,
Description: "system name",
},
"ipa_type": schema.StringAttribute{
Optional: true,
Description: "compute purpose",
},
"numa_affinity": schema.StringAttribute{
Optional: true,
Validators: []validator.String{
stringvalidator.OneOf("none", "strict", "loose"),
},
//Default: "none",
Description: "Rule for VM placement with NUMA affinity.",
},
"cpu_pin": schema.BoolAttribute{
Optional: true,
//Default: false,
Description: "Run VM on dedicated CPUs. To use this feature, the system must be pre-configured by allocating CPUs on the physical node.",
},
"hp_backed": schema.BoolAttribute{
Optional: true,
//Default: false,
Description: "Use Huge Pages to allocate RAM of the virtual machine. The system must be pre-configured by allocating Huge Pages on the physical node.",
},
// computed attributes
"compute_id": schema.Int64Attribute{
Computed: true,
},
"boot_disk": schema.SingleNestedAttribute{
Computed: true,
Attributes: MakeSchemaResourceComputeDisks(),
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"account_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"acl": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"account_acl": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"explicit": schema.BoolAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"right": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"user_group_id": schema.StringAttribute{
Computed: true,
},
},
},
},
"compute_acl": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"explicit": schema.BoolAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"right": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"user_group_id": schema.StringAttribute{
Computed: true,
},
},
},
},
"rg_acl": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"explicit": schema.BoolAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"right": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"user_group_id": schema.StringAttribute{
Computed: true,
},
},
},
},
},
},
"affinity_weight": schema.Int64Attribute{
Computed: true,
},
"arch": schema.StringAttribute{
Computed: true,
},
"boot_order": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"boot_disk_id": schema.Int64Attribute{
Computed: true,
},
"cd_image_id": schema.Int64Attribute{
Computed: true,
},
"clone_reference": schema.Int64Attribute{
Computed: true,
},
"clones": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"computeci_id": schema.Int64Attribute{
Computed: true,
},
"created_by": schema.StringAttribute{
Computed: true,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_by": schema.StringAttribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"devices": schema.StringAttribute{
Computed: true,
},
"disks": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: MakeSchemaResourceComputeDisks(),
},
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"image_name": schema.StringAttribute{
Computed: true,
},
"interfaces": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"conn_id": schema.Int64Attribute{
Computed: true,
},
"conn_type": schema.StringAttribute{
Computed: true,
},
"get_gw": schema.StringAttribute{
Computed: true,
},
"enabled": schema.BoolAttribute{
Computed: true,
},
"flip_group_id": schema.Int64Attribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"ip_address": schema.StringAttribute{
Computed: true,
},
"listen_ssh": schema.BoolAttribute{
Computed: true,
},
"mac": schema.StringAttribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
"net_id": schema.Int64Attribute{
Computed: true,
},
"netmask": schema.Int64Attribute{
Computed: true,
},
"net_type": schema.StringAttribute{
Computed: true,
},
"node_id": schema.Int64Attribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"qos": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"e_rate": schema.Int64Attribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"in_burst": schema.Int64Attribute{
Computed: true,
},
"in_rate": schema.Int64Attribute{
Computed: true,
},
},
},
"target": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vnfs": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
},
},
},
"lock_status": schema.StringAttribute{
Computed: true,
},
"manager_id": schema.Int64Attribute{
Computed: true,
},
"manager_type": schema.StringAttribute{
Computed: true,
},
"migrationjob": schema.Int64Attribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"need_reboot": schema.BoolAttribute{
Computed: true,
},
"numa_node_id": schema.Int64Attribute{
Computed: true,
},
"natable_vins_id": schema.Int64Attribute{
Computed: true,
},
"natable_vins_ip": schema.StringAttribute{
Computed: true,
},
"natable_vins_name": schema.StringAttribute{
Computed: true,
},
"natable_vins_network": schema.StringAttribute{
Computed: true,
},
"natable_vins_network_name": schema.StringAttribute{
Computed: true,
},
"os_users": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"login": schema.StringAttribute{
Computed: true,
},
"password": schema.StringAttribute{
Computed: true,
},
"public_key": schema.StringAttribute{
Computed: true,
},
},
},
},
"pinned": schema.BoolAttribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"registered": schema.BoolAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"reserved_node_cpus": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"rg_name": schema.StringAttribute{
Computed: true,
},
"snap_sets": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"disks": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"stateless_sep_id": schema.Int64Attribute{
Computed: true,
},
"stateless_sep_type": schema.StringAttribute{
Computed: true,
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"updated_by": schema.StringAttribute{
Computed: true,
},
"updated_time": schema.Int64Attribute{
Computed: true,
},
"user_data": schema.StringAttribute{
Computed: true,
},
"user_managed": schema.BoolAttribute{
Computed: true,
},
"vgpus": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"virtual_image_id": schema.Int64Attribute{
Computed: true,
},
"virtual_image_name": schema.StringAttribute{
Computed: true,
},
}
}
func MakeSchemaResourceComputeDisks() map[string]schema.Attribute {
return map[string]schema.Attribute{
"ckey": schema.StringAttribute{
Computed: true,
},
"acl": schema.StringAttribute{
Computed: true,
},
"account_id": schema.Int64Attribute{
Computed: true,
},
"boot_partition": schema.Int64Attribute{
Computed: true,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"destruction_time": schema.Int64Attribute{
Computed: true,
},
"disk_path": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"disk_id": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"images": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"iotune": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"read_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"read_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"size_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
},
},
"iqn": schema.StringAttribute{
Computed: true,
},
"login": schema.StringAttribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
"order": schema.Int64Attribute{
Computed: true,
},
"params": schema.StringAttribute{
Computed: true,
},
"parent_id": schema.Int64Attribute{
Computed: true,
},
"passwd": schema.StringAttribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"pool": schema.StringAttribute{
Computed: true,
},
"present_to": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"purge_time": schema.Int64Attribute{
Computed: true,
},
"replication": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Computed: true,
},
"pool_id": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"self_volume_id": schema.StringAttribute{
Computed: true,
},
"storage_id": schema.StringAttribute{
Computed: true,
},
"volume_id": schema.StringAttribute{
Computed: true,
},
},
},
"reality_device_number": schema.Int64Attribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"sep_id": schema.Int64Attribute{
Computed: true,
},
"shareable": schema.BoolAttribute{
Computed: true,
},
"size_max": schema.Int64Attribute{
Computed: true,
},
"size_used": schema.Float64Attribute{
Computed: true,
},
"snapshots": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vmid": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,28 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
)
func ComputeCheckPresence(ctx context.Context, state *models.RecordComputeModel, c *decort.DecortClient) (*compute.RecordCompute, error) {
tflog.Info(ctx, "Get compute info")
req := compute.GetRequest{
ComputeID: uint64(state.ComputeId.ValueInt64()),
}
tflog.Info(ctx, "Check req", map[string]any{"req": req})
computeRecord, err := c.CloudAPI().Compute().Get(ctx, req)
if err != nil {
return nil, err
}
tflog.Info(ctx, "Getting compute info, successfully")
return computeRecord, nil
}

View File

@@ -0,0 +1,28 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
)
func ComputeGetAuditsCheckPresence(ctx context.Context, state *models.GetAuditsModel, c *decort.DecortClient) (*compute.ListShortAudits, error) {
tflog.Info(ctx, "Get compute short audits info")
req := compute.GetAuditsRequest{
ComputeID: uint64(state.ComputeID.ValueInt64()),
}
tflog.Info(ctx, "Check req", map[string]any{"req": req})
auditsShortList, err := c.CloudAPI().Compute().GetAudits(ctx, req)
if err != nil {
return nil, err
}
tflog.Info(ctx, "Getting compute short audits, successfully")
return &auditsShortList, nil
}

View File

@@ -0,0 +1,28 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
)
func ComputeAuditsCheckPresence(ctx context.Context, state *models.ListAuditsModel, c *decort.DecortClient) (*compute.ListAudits, error) {
tflog.Info(ctx, "Get compute audits list info")
req := compute.AuditsRequest{
ComputeID: uint64(state.ComputeID.ValueInt64()),
}
tflog.Info(ctx, "Check req", map[string]any{"req": req})
auditsList, err := c.CloudAPI().Compute().Audits(ctx, req)
if err != nil {
return nil, err
}
tflog.Info(ctx, "Getting compute audits list, successfully")
return &auditsList, nil
}

View File

@@ -0,0 +1,32 @@
package utilities
import (
"context"
"strings"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
)
func ComputeGetConsoleUrlCheckPresence(ctx context.Context, state *models.GetConsoleUrlModel, c *decort.DecortClient) (string, error) {
tflog.Info(ctx, "Get compute console url")
req := compute.GetConsoleURLRequest{
ComputeID: uint64(state.ComputeID.ValueInt64()),
}
tflog.Info(ctx, "Check req", map[string]any{"req": req})
consoleUrl, err := c.CloudAPI().Compute().GetConsoleURL(ctx, req)
if err != nil {
return "", err
}
result := strings.ReplaceAll(consoleUrl, "\"", "")
result = strings.ReplaceAll(result, "\\", "")
tflog.Info(ctx, "Getting compute console url, successfully")
return result, nil
}

View File

@@ -0,0 +1,29 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
)
func ComputeGetLogCheckPresence(ctx context.Context, state *models.GetLogModel, c *decort.DecortClient) (string, error) {
tflog.Info(ctx, "Get compute log")
req := compute.GetLogRequest{
ComputeID: uint64(state.ComputeID.ValueInt64()),
Path: state.Path.ValueString(),
}
tflog.Info(ctx, "Check req", map[string]any{"req": req})
log, err := c.CloudAPI().Compute().GetLog(ctx, req)
if err != nil {
return "", err
}
tflog.Info(ctx, "Getting compute log, successfully")
return log, nil
}

View File

@@ -0,0 +1,83 @@
package utilities
import (
"context"
"regexp"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
)
func ComputeListCheckPresence(ctx context.Context, state *models.ListComputesModel, c *decort.DecortClient) (*compute.ListComputes, error) {
tflog.Info(ctx, "Get compute list info")
req := compute.ListRequest{}
if !state.ByID.IsNull() {
req.ByID = uint64(state.ByID.ValueInt64())
}
if !state.Name.IsNull() {
req.Name = state.Name.ValueString()
}
if !state.AccountID.IsNull() {
req.AccountID = uint64(state.AccountID.ValueInt64())
}
if !state.RGName.IsNull() {
req.RGName = state.RGName.ValueString()
}
if !state.RGID.IsNull() {
req.RGID = uint64(state.RGID.ValueInt64())
}
if !state.TechStatus.IsNull() {
req.TechStatus = state.TechStatus.ValueString()
}
if !state.Status.IsNull() {
req.Status = state.Status.ValueString()
}
if !state.IPAddress.IsNull() {
req.IPAddress = state.IPAddress.ValueString()
}
if !state.ExtNetName.IsNull() {
req.ExtNetName = state.ExtNetName.ValueString()
}
if !state.ExtNetID.IsNull() {
req.ExtNetID = uint64(state.ExtNetID.ValueInt64())
}
if !state.IncludeDeleted.IsNull() {
req.IncludeDeleted = state.IncludeDeleted.ValueBool()
}
if !state.SortBy.IsNull() {
req.SortBy = state.SortBy.ValueString()
}
if !state.Page.IsNull() {
req.Page = uint64(state.Page.ValueInt64())
}
if !state.Size.IsNull() {
req.Size = uint64(state.Size.ValueInt64())
}
tflog.Info(ctx, "Check req", map[string]any{"req": req})
list, err := c.CloudAPI().Compute().List(ctx, req)
if err != nil {
return nil, err
}
if !state.IgnoreK8s.IsNull() && state.IgnoreK8s.ValueBool() {
list = matchComputes(list)
}
tflog.Info(ctx, "Getting compute list info, successfully")
return list, nil
}
func matchComputes(computeList *compute.ListComputes) *compute.ListComputes {
matched, _ := regexp.Compile(`[a-zA-Z]+\\d+-[a-zA-Z]+\\d+-[a-zA-Z]+\\d+`)
result := computeList.FilterFunc(func(ic compute.ItemCompute) bool {
res := matched.Match([]byte(ic.Name))
return !res
})
return &result
}

View File

@@ -0,0 +1,66 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
)
func ComputeListDeletedCheckPresence(ctx context.Context, state *models.ListDeletedComputesModel, c *decort.DecortClient) (*compute.ListComputes, error) {
tflog.Info(ctx, "Get compute list deleted info")
req := compute.ListDeletedRequest{}
if !state.ByID.IsNull() {
req.ByID = uint64(state.ByID.ValueInt64())
}
if !state.Name.IsNull() {
req.Name = state.Name.ValueString()
}
if !state.AccountID.IsNull() {
req.AccountID = uint64(state.AccountID.ValueInt64())
}
if !state.RGName.IsNull() {
req.RGName = state.RGName.ValueString()
}
if !state.RGID.IsNull() {
req.RGID = uint64(state.RGID.ValueInt64())
}
if !state.TechStatus.IsNull() {
req.TechStatus = state.TechStatus.ValueString()
}
if !state.IPAddress.IsNull() {
req.IPAddress = state.IPAddress.ValueString()
}
if !state.ExtNetName.IsNull() {
req.ExtNetName = state.ExtNetName.ValueString()
}
if !state.ExtNetID.IsNull() {
req.ExtNetID = uint64(state.ExtNetID.ValueInt64())
}
if !state.SortBy.IsNull() {
req.SortBy = state.SortBy.ValueString()
}
if !state.Page.IsNull() {
req.Page = uint64(state.Page.ValueInt64())
}
if !state.Size.IsNull() {
req.Size = uint64(state.Size.ValueInt64())
}
tflog.Info(ctx, "Check req", map[string]any{"req": req})
list, err := c.CloudAPI().Compute().ListDeleted(ctx, req)
if err != nil {
return nil, err
}
if !state.IgnoreK8s.IsNull() && state.IgnoreK8s.ValueBool() {
list = matchComputes(list)
}
tflog.Info(ctx, "Getting compute list deleted info, successfully")
return list, nil
}

View File

@@ -0,0 +1,50 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
)
func ComputePciDeviceListCheckPresence(ctx context.Context, state *models.ListPCIDevicesModel, c *decort.DecortClient) (*compute.ListPCIDevices, error) {
tflog.Info(ctx, "Get compute pci device list info")
req := compute.ListPCIDeviceRequest{
ComputeID: uint64(state.ComputeID.ValueInt64()),
}
if !state.RGID.IsNull() {
req.RGID = uint64(state.RGID.ValueInt64())
}
if !state.DevID.IsNull() {
req.DevID = uint64(state.DevID.ValueInt64())
}
if !state.Name.IsNull() {
req.Name = state.Name.ValueString()
}
if !state.Status.IsNull() {
req.Status = state.Status.ValueString()
}
if !state.SortBy.IsNull() {
req.SortBy = state.SortBy.ValueString()
}
if !state.Page.IsNull() {
req.Page = uint64(state.Page.ValueInt64())
}
if !state.Size.IsNull() {
req.Size = uint64(state.Size.ValueInt64())
}
tflog.Info(ctx, "Check req", map[string]any{"req": req})
log, err := c.CloudAPI().Compute().ListPCIDevice(ctx, req)
if err != nil {
return nil, err
}
tflog.Info(ctx, "Getting compute pci device list info, successfully")
return log, nil
}

View File

@@ -0,0 +1,28 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
)
func ComputePFWListCheckPresence(ctx context.Context, state *models.ListPFWsModel, c *decort.DecortClient) (*compute.ListPFWs, error) {
tflog.Info(ctx, "Get compute pfw list info")
req := compute.PFWListRequest{
ComputeID: uint64(state.ComputeID.ValueInt64()),
}
tflog.Info(ctx, "Check req", map[string]any{"req": req})
pfwList, err := c.CloudAPI().Compute().PFWList(ctx, req)
if err != nil {
return nil, err
}
tflog.Info(ctx, "Getting compute pfw list info, successfully")
return pfwList, nil
}

View File

@@ -0,0 +1,31 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
)
func ComputeSnapshotUsageCheckPresence(ctx context.Context, state *models.ListSnapShotsModel, c *decort.DecortClient) (*compute.ListUsageSnapshots, error) {
tflog.Info(ctx, "Get compute snapshot usage info")
req := compute.SnapshotUsageRequest{
ComputeID: uint64(state.ComputeID.ValueInt64()),
}
if !state.Label.IsNull() {
req.Label = state.Label.ValueString()
}
tflog.Info(ctx, "Check req", map[string]any{"req": req})
snapshotUsage, err := c.CloudAPI().Compute().SnapshotUsage(ctx, req)
if err != nil {
return nil, err
}
tflog.Info(ctx, "Getting compute snapshot usage info, successfully")
return &snapshotUsage, nil
}

View File

@@ -0,0 +1,28 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
)
func ComputeUserListCheckPresence(ctx context.Context, state *models.ListUsersModel, c *decort.DecortClient) (*compute.ListUsers, error) {
tflog.Info(ctx, "Get compute user list info")
req := compute.UserListRequest{
ComputeID: uint64(state.ComputeID.ValueInt64()),
}
tflog.Info(ctx, "Check req", map[string]any{"req": req})
userList, err := c.CloudAPI().Compute().UserList(ctx, req)
if err != nil {
return nil, err
}
tflog.Info(ctx, "Getting compute user list info, successfully")
return userList, nil
}

View File

@@ -0,0 +1,50 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
)
func ComputeVGPUListCheckPresence(ctx context.Context, state *models.ListVGPUsModel, c *decort.DecortClient) (*compute.ListVGPUs, error) {
tflog.Info(ctx, "Get compute vgpu list info")
req := compute.ListVGPURequest{
ComputeID: uint64(state.ComputeID.ValueInt64()),
}
if !state.GPUID.IsNull() {
req.GPUID = uint64(state.GPUID.ValueInt64())
}
if !state.Type.IsNull() {
req.Type = state.Type.ValueString()
}
if !state.Status.IsNull() {
req.Status = state.Status.ValueString()
}
if !state.Page.IsNull() {
req.Page = uint64(state.Page.ValueInt64())
}
if !state.SortBy.IsNull() {
req.SortBy = state.SortBy.ValueString()
}
if !state.Size.IsNull() {
req.Size = uint64(state.Size.ValueInt64())
}
if !state.IncludeDeleted.IsNull() {
req.IncludeDeleted = state.IncludeDeleted.ValueBool()
}
tflog.Info(ctx, "Check req", map[string]any{"req": req})
log, err := c.CloudAPI().Compute().ListVGPU(ctx, req)
if err != nil {
return nil, err
}
tflog.Info(ctx, "Getting compute vgpu list info, successfully")
return log, nil
}

View File

@@ -0,0 +1,807 @@
package utilities
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/kvmppc"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/kvmx86"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status"
)
func ComputeResourceCheckPresence(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) (*compute.RecordCompute, diag.Diagnostics) {
tflog.Info(ctx, fmt.Sprintf("ComputeResourceCheckPresence: Get info about compute with ID - %v", plan.ID.ValueString()))
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("ComputeResourceCheckPresence: Cannot parsed ID lb from state", err.Error())
return nil, diags
}
computeItem, err := c.CloudAPI().Compute().Get(ctx, compute.GetRequest{ComputeID: computeId})
if err != nil {
diags.AddError(fmt.Sprintf("ComputeResourceCheckPresence: Cannot get info about compute with ID %v", computeId), err.Error())
return nil, diags
}
return computeItem, nil
}
func ComputeResourceCheckDetachDisks(state *models.ResourceComputeModel, recordCompute *compute.RecordCompute) bool {
bootDiskId := uint64(state.BootDiskId.ValueInt64())
extraDiskList := state.ExtraDisks.Elements()
for _, disk := range recordCompute.Disks {
if disk.ID == bootDiskId {
continue
}
for _, extraDisk := range extraDiskList {
if uint64(extraDisk.(types.Int64).ValueInt64()) == disk.ID {
return false
}
}
return true
}
return true
}
func ComputeResourceBootDiskCheckPresence(ctx context.Context, state *models.ResourceComputeModel, c *decort.DecortClient) (*compute.ItemComputeDisk, diag.Diagnostics) {
tflog.Info(ctx, "ComputeResourceBootDiskCheckPresence: Get info about boot disk")
diags := diag.Diagnostics{}
recordItemCompute, diags := ComputeResourceCheckPresence(ctx, state, c)
if diags.HasError() {
return nil, diags
}
bootDisk := &compute.ItemComputeDisk{}
for _, disk := range recordItemCompute.Disks {
if disk.Name == "bootdisk" {
*bootDisk = disk
break
}
}
return bootDisk, nil
}
func CreateResourceCompute(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) (uint64, diag.Diagnostics) {
tflog.Info(ctx, fmt.Sprintf("CreateResourceCompute: Start create ResourceCompute: name %s", plan.Name.ValueString()))
diags := diag.Diagnostics{}
createReqX86 := kvmx86.CreateRequest{Start: false}
createReqPPC := kvmppc.CreateRequest{Start: false}
if !plan.Description.IsUnknown() {
createReqPPC.Description = plan.Description.ValueString()
createReqX86.Description = plan.Description.ValueString()
}
if !plan.SepId.IsUnknown() {
createReqPPC.SEPID = uint64(plan.SepId.ValueInt64())
createReqX86.SepID = uint64(plan.SepId.ValueInt64())
}
if !plan.Pool.IsUnknown() {
createReqPPC.Pool = plan.Pool.ValueString()
createReqX86.Pool = plan.Pool.ValueString()
}
if !plan.IpaType.IsNull() {
createReqPPC.IPAType = plan.IpaType.ValueString()
createReqX86.IPAType = plan.IpaType.ValueString()
}
if !plan.BootDiskSize.IsNull() {
createReqPPC.BootDisk = uint64(plan.BootDiskSize.ValueInt64())
createReqX86.BootDisk = uint64(plan.BootDiskSize.ValueInt64())
}
if !plan.IS.IsNull() {
createReqPPC.IS = plan.IS.ValueString()
createReqX86.IS = plan.IS.ValueString()
}
createReqX86.Interfaces = make([]kvmx86.Interface, 0)
if !plan.Network.IsNull() {
networkList := plan.Network.Elements()
interfaces := make([]kvmx86.Interface, 0)
for _, elem := range networkList {
objVal := elem.(types.Object)
elemMap := objVal.Attributes()
reqInterface := kvmx86.Interface{
NetType: strings.ToUpper(elemMap["net_type"].(types.String).ValueString()),
NetID: uint64(elemMap["net_id"].(types.Int64).ValueInt64()),
}
ipaddr, ipSet := elemMap["ip_address"]
if ipSet {
reqInterface.IPAddr = ipaddr.(types.String).ValueString()
}
interfaces = append(interfaces, reqInterface)
}
createReqX86.Interfaces = interfaces
}
createReqPPC.Interfaces = make([]kvmppc.Interface, 0)
if !plan.Network.IsNull() {
networkList := plan.Network.Elements()
interfaces := make([]kvmppc.Interface, 0)
for _, elem := range networkList {
objVal := elem.(types.Object)
elemMap := objVal.Attributes()
reqInterface := kvmppc.Interface{
NetType: strings.ToUpper(elemMap["net_type"].(types.String).ValueString()),
NetID: uint64(elemMap["net_id"].(types.Int64).ValueInt64()),
}
ipaddr, ipSet := elemMap["ip_address"]
if ipSet {
reqInterface.IPAddr = ipaddr.(types.String).ValueString()
}
interfaces = append(interfaces, reqInterface)
}
createReqPPC.Interfaces = interfaces
}
if !plan.CloudInit.IsNull() {
userData := plan.CloudInit.ValueString()
if userData != "" && userData != "applied" {
createReqPPC.Userdata = strings.TrimSpace(userData)
createReqX86.Userdata = strings.TrimSpace(userData)
}
}
driver := strings.ToUpper(plan.Driver.ValueString())
if driver == "KVM_PPC" {
createReqPPC.RGID = uint64(plan.RGID.ValueInt64())
createReqPPC.Name = plan.Name.ValueString()
createReqPPC.CPU = uint64(plan.CPU.ValueInt64())
createReqPPC.RAM = uint64(plan.RAM.ValueInt64())
createReqPPC.ImageID = uint64(plan.ImageID.ValueInt64())
tflog.Info(ctx, fmt.Sprintf("CreateResourceCompute: creating Compute of type KVM VM PowerPC"))
id, err := c.CloudAPI().KVMPPC().Create(ctx, createReqPPC)
if err != nil {
diags.AddError("CreateResourceCompute: unable to create KVM VP PowerPC", err.Error())
return 0, diags
}
return id, diags
} else {
createReqX86.RGID = uint64(plan.RGID.ValueInt64())
createReqX86.Name = plan.Name.ValueString()
createReqX86.CPU = uint64(plan.CPU.ValueInt64())
createReqX86.RAM = uint64(plan.RAM.ValueInt64())
createReqX86.Driver = driver
if !plan.ImageID.IsNull() {
createReqX86.ImageID = uint64(plan.ImageID.ValueInt64())
}
if !plan.WithoutBootDisk.IsNull() {
createReqX86.WithoutBootDisk = plan.WithoutBootDisk.ValueBool()
}
if !plan.CustomFields.IsUnknown() { //CustomFields optional && computed
val := plan.CustomFields.ValueString()
val = strings.ReplaceAll(val, "\\", "")
val = strings.ReplaceAll(val, "\n", "")
val = strings.ReplaceAll(val, "\t", "")
val = strings.TrimSpace(val)
createReqX86.CustomFields = val
}
if !plan.NumaAffinity.IsNull() {
createReqX86.NumaAffinity = strings.ToLower(plan.NumaAffinity.ValueString())
}
if !plan.CPUPin.IsNull() && plan.CPUPin.ValueBool() {
createReqX86.CPUPin = true
}
if !plan.HPBacked.IsNull() && plan.HPBacked.ValueBool() {
createReqX86.HPBacked = true
}
tflog.Info(ctx, fmt.Sprintf("CreateResourceCompute: creating Compute of type KVM VM x86"))
id, err := c.CloudAPI().KVMX86().Create(ctx, createReqX86)
if err != nil {
diags.AddError("CreateResourceCompute: unable to create KVM VP x86", err.Error())
return 0, diags
}
return id, diags
}
}
func CleanupResourceCompute(ctx context.Context, computeId uint64, c *decort.DecortClient) {
tflog.Info(ctx, fmt.Sprintf("CleanupResourceCompute: Start delete ResourceCompute: id %d", computeId))
req := compute.DeleteRequest{
ComputeID: computeId,
Permanently: true,
DetachDisks: true,
}
if _, err := c.CloudAPI().Compute().Delete(ctx, req); err != nil {
tflog.Error(ctx, fmt.Sprintf("CleanupResourceCompute: could not delete compute after failed creation: %v", err))
}
}
func ComputeResourceExtraDiskCreate(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeExtraDiskCreate: Start added extra disk(s) from compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("Cannot parsed ID compute from plan", err.Error())
return diags
}
extraDiskList := plan.ExtraDisks.Elements()
for _, disk := range extraDiskList {
diskId := disk.(types.Int64).ValueInt64()
req := compute.DiskAttachRequest{
ComputeID: computeId,
DiskID: uint64(diskId),
}
tflog.Info(ctx, "ComputeExtraDiskCreate: Start attach disk to compute with ID", map[string]any{"compute_id": plan.ID.ValueString(), "disk_id": diskId})
_, err = c.CloudAPI().Compute().DiskAttach(ctx, req)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot attach disk with ID - %d", diskId), err.Error())
}
}
return diags
}
func ComputeResourceEnableDisable(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeEnableDisable: Start enabled/disabled compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("ComputeEnableDisable: cannot parsed ID compute from plan", err.Error())
return diags
}
var enable bool
if plan.Enabled.IsNull() {
enable = true // default value
} else {
enable = plan.Enabled.ValueBool()
}
tflog.Info(ctx, "ComputeEnableDisable: compute to be enabled/disabled", map[string]any{
"compute_id": computeId,
"enable": enable})
if enable {
_, err = c.CloudAPI().Compute().Enable(ctx, compute.EnableRequest{ComputeID: computeId})
if err != nil {
diags.AddWarning(
"ComputeEnableDisable: cannot enable compute", err.Error(),
)
return diags
}
}
if !enable {
_, err = c.CloudAPI().Compute().Disable(ctx, compute.DisableRequest{ComputeID: computeId})
if err != nil {
diags.AddWarning(
"ComputeEnableDisable: cannot disable compute", err.Error(),
)
return diags
}
}
tflog.Info(ctx, "ComputeEnableDisable: compute is successfully enabled/disabled", map[string]any{"compute_id": computeId, "enable": enable})
return nil
}
func ComputeResourceStartStop(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeStartStop: Start started/stopped compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("ComputeStartStop: cannot parsed ID compute from plan", err.Error())
return diags
}
var started bool
if plan.Started.IsNull() {
started = true // default value
} else {
started = plan.Started.ValueBool()
}
tflog.Info(ctx, "ComputeStartStop: compute to be started/stopped", map[string]any{
"compute_id": computeId,
"started": started})
if started {
_, err = c.CloudAPI().Compute().Start(ctx, compute.StartRequest{ComputeID: computeId})
if err != nil {
diags.AddWarning(
"ComputeStartStop: cannot start compute", err.Error(),
)
return diags
}
}
if !started {
_, err = c.CloudAPI().Compute().Stop(ctx, compute.StopRequest{ComputeID: computeId})
if err != nil {
diags.AddWarning(
"ComputeStartStop: cannot stop compute", err.Error(),
)
return diags
}
}
tflog.Info(ctx, "ComputeStartStop: compute is successfully started/stopped", map[string]any{"compute_id": computeId, "started": started})
return nil
}
func ComputeResourceAffinityLabel(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeAffinityLabel: Start added affinity label to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("ComputeAffinityLabel: cannot parsed ID compute from plan", err.Error())
return diags
}
req := compute.AffinityLabelSetRequest{
ComputeID: computeId,
AffinityLabel: plan.AffinityLabel.ValueString(),
}
_, err = c.CloudAPI().Compute().AffinityLabelSet(ctx, req)
if err != nil {
diags.AddWarning(
"ComputeAffinityLabel: Unable to AffinityLabelAdd for Compute",
err.Error(),
)
return diags
}
tflog.Info(ctx, "ComputeAffinityLabel: affinity label is successfully added", map[string]any{"compute_id": computeId})
return nil
}
func ComputeResourceAffinityRules(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeAffinityRules: Start added affinity rules to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("ComputeAffinityRules: cannot parsed ID compute from plan", err.Error())
return diags
}
itemsAffinityRules := make([]models.ItemResourceRulesModel, 0, len(plan.AffinityRules.Elements()))
diags = plan.AffinityRules.ElementsAs(ctx, &itemsAffinityRules, false)
if diags.HasError() {
diags.AddWarning(
fmt.Sprintf("ComputeAffinityRules: Unable to set affinity rules for compute %d", computeId),
"cannot populate itemsAffinityRules with plan.AffinityRules List elements",
)
return diags
}
for _, itemAffinityRules := range itemsAffinityRules {
req := compute.AffinityRuleAddRequest{
ComputeID: computeId,
Topology: strings.ToLower(itemAffinityRules.Topology.ValueString()),
Policy: strings.ToUpper(itemAffinityRules.Policy.ValueString()),
Mode: strings.ToUpper(itemAffinityRules.Mode.ValueString()),
Key: itemAffinityRules.Key.ValueString(),
Value: itemAffinityRules.Value.ValueString(),
}
tflog.Info(ctx, "ComputeAffinityRules: before calling CloudAPI().Compute().AffinityRuleAdd", map[string]any{"compute_id": computeId, "req": req})
res, err := c.CloudAPI().Compute().AffinityRuleAdd(ctx, req)
tflog.Info(ctx, "ComputeAffinityRules: response from CloudAPI().Compute().AffinityRuleAdd", map[string]any{"compute_id": computeId, "response": res})
if err != nil {
diags.AddWarning(
"ComputeAffinityRules: Unable to AffinityRuleAdd for Compute",
err.Error(),
)
return diags
}
}
if diags.WarningsCount() != 0 {
return diags
}
tflog.Info(ctx, "ComputeAffinityRules: affinity rules is successfully added", map[string]any{"compute_id": computeId})
return nil
}
func ComputeResourceAntiAffinityRules(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeAntiAffinityRules: Start added anti affinity rules to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("ComputeAntiAffinityRules: cannot parsed ID compute from plan", err.Error())
return diags
}
itemsAntiAffinityRules := make([]models.ItemResourceRulesModel, 0, len(plan.AntiAffinityRules.Elements()))
diags = plan.AntiAffinityRules.ElementsAs(ctx, &itemsAntiAffinityRules, false)
if diags.HasError() {
diags.AddWarning(
fmt.Sprintf("ComputeAntiAffinityRules: Unable to set anti affinity rules for compute %d", computeId),
"cannot populate itemsAntiAffinityRules with plan.AntiAffinityRules List elements",
)
return diags
}
for _, itemAntiAffinityRules := range itemsAntiAffinityRules {
req := compute.AntiAffinityRuleAddRequest{
ComputeID: computeId,
Topology: strings.ToLower(itemAntiAffinityRules.Topology.ValueString()),
Policy: strings.ToUpper(itemAntiAffinityRules.Policy.ValueString()),
Mode: strings.ToUpper(itemAntiAffinityRules.Mode.ValueString()),
Key: itemAntiAffinityRules.Key.ValueString(),
Value: itemAntiAffinityRules.Value.ValueString(),
}
tflog.Info(ctx, "ComputeAntiAffinityRules: before calling CloudAPI().Compute().AntiAffinityRuleAdd", map[string]any{"compute_id": computeId, "req": req})
res, err := c.CloudAPI().Compute().AntiAffinityRuleAdd(ctx, req)
tflog.Info(ctx, "ComputeAntiAffinityRules: response from CloudAPI().Compute().AntiAffinityRuleAdd", map[string]any{"compute_id": computeId, "response": res})
if err != nil {
diags.AddWarning(
"ComputeAntiAffinityRules: Unable to AntiAffinityRuleAdd for Compute",
err.Error(),
)
return diags
}
}
if diags.WarningsCount() != 0 {
return diags
}
tflog.Info(ctx, "ComputeAntiAffinityRules: anti affinity rules is successfully added", map[string]any{"compute_id": computeId})
return nil
}
func ComputeResourceTags(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeResourceTags: Start added tags to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("ComputeResourceTags: cannot parsed ID compute from plan", err.Error())
return diags
}
for _, tags := range plan.Tags.Elements() {
req := compute.TagAddRequest{ComputeID: computeId}
objTag := tags.(types.Object)
tagMap := objTag.Attributes()
req.Key = tagMap["key"].(types.String).ValueString()
req.Value = tagMap["value"].(types.String).ValueString()
tflog.Info(ctx, "ComputeResourceTags: before calling CloudAPI().Compute().TagAdd", map[string]any{"compute_id": computeId, "req": req})
res, err := c.CloudAPI().Compute().TagAdd(ctx, req)
tflog.Info(ctx, "ComputeResourceTags: response from CloudAPI().Compute().TagAdd", map[string]any{"compute_id": computeId, "response": res})
if err != nil {
diags.AddWarning(
"ComputeResourceTags: Unable to add tags for Compute",
err.Error(),
)
}
}
if diags.WarningsCount() != 0 {
return diags
}
tflog.Info(ctx, "ComputeResourceTags: tags is successfully added", map[string]any{"compute_id": computeId})
return nil
}
func ComputeResourcePortForwarding(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeResourcePortForwarding: Start added port forwarding to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("ComputeResourcePortForwarding: cannot parsed ID compute from plan", err.Error())
return diags
}
itemsPortForwarding := make([]models.ItemPortForfardingModel, 0, len(plan.PortForwarding.Elements()))
diags = plan.PortForwarding.ElementsAs(ctx, &itemsPortForwarding, false)
if diags.HasError() {
diags.AddWarning(
fmt.Sprintf("ComputeResourcePortForwarding: Unable to set port forwading rules for compute %d", computeId),
"cannot populate itemsPortForwarding with plan.PortForwarding List elements",
)
return diags
}
for _, itemPortForwarding := range itemsPortForwarding {
req := compute.PFWAddRequest{
ComputeID: computeId,
PublicPortStart: uint64(itemPortForwarding.PublicPortStart.ValueInt64()),
Proto: strings.ToLower(itemPortForwarding.Proto.ValueString()),
}
if itemPortForwarding.PublicPortStart.ValueInt64() != 0 {
req.PublicPortStart = uint64(itemPortForwarding.PublicPortStart.ValueInt64())
}
if itemPortForwarding.LocalPort.ValueInt64() != 0 {
req.LocalBasePort = uint64(itemPortForwarding.LocalPort.ValueInt64())
}
tflog.Info(ctx, "ComputeResourcePortForwarding: before calling CloudAPI().Compute().PFWAdd", map[string]any{"compute_id": computeId, "req": req})
res, err := c.CloudAPI().Compute().PFWAdd(ctx, req)
tflog.Info(ctx, "ComputeResourcePortForwarding: response from CloudAPI().Compute().PFWAdd", map[string]any{"compute_id": computeId, "response": res})
if err != nil {
diags.AddWarning(
"ComputeResourcePortForwarding: Unable to add pfw for Compute",
err.Error(),
)
}
}
if diags.WarningsCount() != 0 {
return diags
}
tflog.Info(ctx, "ComputeResourcePortForwarding: port forwarding rules is successfully added", map[string]any{"compute_id": computeId})
return nil
}
func ComputeResourceUserAccess(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeResourceUserAccess: Start added user(s) access to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("ComputeResourceUserAccess: cannot parsed ID compute from plan", err.Error())
return diags
}
for _, userAccess := range plan.UserAccess.Elements() {
req := compute.UserGrantRequest{ComputeID: computeId}
usersTag := userAccess.(types.Object)
usersMap := usersTag.Attributes()
req.Username = usersMap["username"].(types.String).ValueString()
req.AccessType = usersMap["access_type"].(types.String).ValueString()
tflog.Info(ctx, "ComputeResourceUserAccess: before calling CloudAPI().Compute().UserGrant", map[string]any{"compute_id": computeId, "req": req})
res, err := c.CloudAPI().Compute().UserGrant(ctx, req)
tflog.Info(ctx, "ComputeResourceUserAccess: response from CloudAPI().Compute().UserGrant", map[string]any{"compute_id": computeId, "response": res})
if err != nil {
diags.AddWarning(
"ComputeResourceUserAccess: Unable to add user access for Compute",
err.Error(),
)
}
}
if diags.WarningsCount() != 0 {
return diags
}
tflog.Info(ctx, "ComputeResourceUserAccess: user(s) access is successfully added", map[string]any{"compute_id": computeId})
return nil
}
func ComputeResourceSnapshot(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeResourceSnapshot: Start added snapshot to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("ComputeResourceSnapshot: cannot parsed ID compute from plan", err.Error())
return diags
}
for _, snapshots := range plan.Snapshot.Elements() {
req := compute.SnapshotCreateRequest{ComputeID: computeId}
snapshotTag := snapshots.(types.Object)
snapshotMap := snapshotTag.Attributes()
req.Label = snapshotMap["label"].(types.String).ValueString()
tflog.Info(ctx, "ComputeResourceSnapshot: before calling CloudAPI().Compute().SnapshotCreate", map[string]any{"compute_id": computeId, "req": req})
res, err := c.CloudAPI().Compute().SnapshotCreate(ctx, req)
tflog.Info(ctx, "ComputeResourceSnapshot: response from CloudAPI().Compute().SnapshotCreate", map[string]any{"compute_id": computeId, "response": res})
if err != nil {
diags.AddWarning(
"ComputeResourceSnapshot: Unable to add snapshot for Compute",
err.Error(),
)
}
}
if diags.WarningsCount() != 0 {
return diags
}
tflog.Info(ctx, "ComputeResourceSnapshot: snapshot(s) is successfully added", map[string]any{"compute_id": computeId})
return nil
}
func ComputeResourceCDInsert(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeResourceCDInsert: Start added cd to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("ComputeResourceSnapshot: cannot parsed ID compute from plan", err.Error())
return diags
}
req := compute.CDInsertRequest{ComputeID: computeId}
req.CDROMID = uint64(plan.CD.Attributes()["cdrom_id"].(types.Int64).ValueInt64())
tflog.Info(ctx, "ComputeResourceCDInsert: before calling CloudAPI().Compute().CDInsert", map[string]any{"compute_id": computeId, "req": req})
res, err := c.CloudAPI().Compute().CDInsert(ctx, req)
tflog.Info(ctx, "ComputeResourceCDInsert: response from CloudAPI().Compute().CDInsert", map[string]any{"compute_id": computeId, "response": res})
if err != nil {
diags.AddWarning(
"ComputeResourceCDInsert: Unable to add cd for Compute",
err.Error(),
)
return diags
}
tflog.Info(ctx, "ComputeResourceCDInsert: cd(s) is successfully added", map[string]any{"compute_id": computeId})
return nil
}
func ComputeResourcePinToStack(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeResourcePinToStack: Start pin to stack compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("ComputeResourcePinToStack: cannot parsed ID compute from plan", err.Error())
return diags
}
_, err = c.CloudAPI().Compute().PinToStack(ctx, compute.PinToStackRequest{ComputeID: computeId})
if err != nil {
diags.AddWarning(
"ComputeResourcePinToStack: cannot pin to stack compute", err.Error(),
)
return diags
}
tflog.Info(ctx, "ComputeResourcePinToStack: compute is successfully pin to stack", map[string]any{"compute_id": computeId})
return nil
}
func ComputeResourcePause(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeResourcePause: Start pause compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("ComputeResourcePause: cannot parsed ID compute from plan", err.Error())
return diags
}
_, err = c.CloudAPI().Compute().Pause(ctx, compute.PauseRequest{ComputeID: computeId})
if err != nil {
diags.AddWarning(
"ComputeResourcePause: cannot pause compute", err.Error(),
)
return diags
}
tflog.Info(ctx, "ComputeResourcePause: compute is successfully pause", map[string]any{"compute_id": computeId})
return nil
}
func ComputeReadStatus(ctx context.Context, state *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeReadStatus: Read status compute with ID", map[string]any{"rg_id": state.ID.ValueString()})
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(state.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("ComputeReadStatus: Cannot parse resource group ID from state", err.Error())
return diags
}
recordCompute, diags := ComputeResourceCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "ComputeReadStatus: compute values before status check", map[string]any{
"compute_id": computeId,
"recordCompute": recordCompute})
// check resource status
switch recordCompute.Status {
case status.Modeled:
diags.AddError(
"Compute is in status Modeled",
"please, contact support for more information",
)
return diags
case status.Deleted:
tflog.Info(ctx, "ComputeReadStatus: compute with status.Deleted is being checked", map[string]any{
"compute_id": computeId,
"status": recordCompute.Status})
// restore and enable compute in case it is required
if state.Restore.IsNull() || state.Restore.ValueBool() { // default true or user set-up true
diags.Append(RestoreCompute(ctx, computeId, c)...)
if diags.HasError() {
tflog.Error(ctx, "ComputeReadStatus: cannot restore compute")
return diags
}
tflog.Info(ctx, "ComputeReadStatus: compute restored successfully", map[string]any{"compute_id": computeId})
if !state.ExtraDisks.IsNull() && ComputeResourceCheckDetachDisks(state, recordCompute) {
diags.Append(ComputeResourceExtraDiskCreate(ctx, state, c)...)
if diags.HasError() {
tflog.Error(ctx, "ComputeReadStatus: error when attaching extra disk(s) to a Compute ")
return diags
}
}
if state.Enabled.IsNull() || state.Enabled.ValueBool() { // default true or user set-up true
diags.Append(ComputeResourceEnableDisable(ctx, state, c)...)
if diags.HasError() {
tflog.Error(ctx, "ComputeReadStatus: Unable to enable compute")
return diags
}
tflog.Info(ctx, "ComputeReadStatus: compute enabled successfully", map[string]any{"compute_id": computeId})
if state.Started.IsNull() || state.Started.ValueBool() {
diags.Append(ComputeResourceStartStop(ctx, state, c)...)
if diags.HasError() {
tflog.Error(ctx, "ComputeReadStatus: Unable to start compute")
return diags
}
}
}
}
case status.Destroyed:
diags.AddError(
"ComputeReadStatus: compute is in status Destroyed",
fmt.Sprintf("the resource with id %d cannot be read because it has been destroyed", computeId),
)
return diags
}
return nil
}
func RestoreCompute(ctx context.Context, computeId uint64, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
restoreReq := compute.RestoreRequest{ComputeID: computeId}
tflog.Info(ctx, "RestoreCompute: before calling CloudAPI().Compute().Restore", map[string]any{"compute_id": computeId, "req": restoreReq})
res, err := c.CloudAPI().Compute().Restore(ctx, restoreReq)
if err != nil {
diags.AddError(
"RestoreCompute: cannot restore compute",
err.Error(),
)
return diags
}
tflog.Info(ctx, "RestoreCompute: response from CloudAPI().Compute().Restore", map[string]any{"compute_id": computeId, "response": res})
return nil
}

View File

@@ -0,0 +1,232 @@
package utilities
import "github.com/hashicorp/terraform-plugin-framework/types"
// differenceSimpleType returns lists added and removed values
func differenceSimpleType(oldSet, newSet types.Set) (added, removed []any) {
oldMap := make(map[interface{}]struct{})
newMap := make(map[interface{}]struct{})
for _, elem := range oldSet.Elements() {
oldMap[elem] = struct{}{}
}
for _, elem := range newSet.Elements() {
newMap[elem] = struct{}{}
}
for elem := range newMap {
if _, found := oldMap[elem]; !found {
added = append(added, elem)
}
}
for elem := range oldMap {
if _, found := newMap[elem]; !found {
removed = append(removed, elem)
}
}
return
}
func differenceNetwork(oldSet, newSet types.Set) (added, removed []any) {
oldSlice := oldSet.Elements()
newSlice := newSet.Elements()
foundIdx := make([]bool, len(oldSlice))
for _, newElem := range newSlice {
newObj := newElem.(types.Object)
newElemMap := newObj.Attributes()
found := false
for i, oldElem := range oldSlice {
oldObj := oldElem.(types.Object)
oldElemMap := oldObj.Attributes()
if oldElemMap["net_type"] == newElemMap["net_type"] && oldElemMap["net_id"] == newElemMap["net_id"] {
ipaddr, ipSet := newElemMap["ip_address"]
if !ipSet || ipaddr.(types.String).ValueString() == "" || ipaddr == oldElemMap["ip_address"] {
found = true
foundIdx[i] = true
break
}
}
}
if !found {
added = append(added, newElem)
}
}
for i, found := range foundIdx {
if !found {
removed = append(removed, oldSlice[i])
}
}
return
}
func differenceAffinityRules(oldSet, newSet types.Set) (added, removed []any) {
oldSlice := oldSet.Elements()
newSlice := newSet.Elements()
foundIdx := make([]bool, len(oldSlice))
for _, newElem := range newSlice {
newObj := newElem.(types.Object)
newElemMap := newObj.Attributes()
found := false
for i, oldElem := range oldSlice {
oldObj := oldElem.(types.Object)
oldElemMap := oldObj.Attributes()
if oldElemMap["topology"] == newElemMap["topology"] && oldElemMap["policy"] == newElemMap["policy"] &&
oldElemMap["mode"] == newElemMap["mode"] && oldElemMap["key"] == newElemMap["key"] {
valueOld, okOld := oldElemMap["value"]
valueNew, okNew := newElemMap["value"]
if !okNew && !okOld || valueOld == valueNew {
found = true
foundIdx[i] = true
break
}
}
}
if !found {
added = append(added, newElem)
}
}
for i, found := range foundIdx {
if !found {
removed = append(removed, oldSlice[i])
}
}
return
}
func differenceTags(oldSet, newSet types.Set) (added, removed []any) {
oldSlice := oldSet.Elements()
newSlice := newSet.Elements()
foundIdx := make([]bool, len(oldSlice))
for _, newElem := range newSlice {
newObj := newElem.(types.Object)
newElemMap := newObj.Attributes()
found := false
for i, oldElem := range oldSlice {
oldObj := oldElem.(types.Object)
oldElemMap := oldObj.Attributes()
if oldElemMap["key"] == newElemMap["key"] && oldElemMap["value"] == newElemMap["value"] {
found = true
foundIdx[i] = true
break
}
}
if !found {
added = append(added, newElem)
}
}
for i, found := range foundIdx {
if !found {
removed = append(removed, oldSlice[i])
}
}
return
}
func differencePortForwarding(oldSet, newSet types.Set) (added, removed []any) {
oldSlice := oldSet.Elements()
newSlice := newSet.Elements()
foundIdx := make([]bool, len(oldSlice))
for _, newElem := range newSlice {
newObj := newElem.(types.Object)
newElemMap := newObj.Attributes()
found := false
for i, oldElem := range oldSlice {
oldObj := oldElem.(types.Object)
oldElemMap := oldObj.Attributes()
if oldElemMap["public_port_start"] == newElemMap["public_port_start"] && oldElemMap["public_port_end"] == newElemMap["public_port_end"] &&
oldElemMap["local_port"] == newElemMap["local_port"] && oldElemMap["proto"] == newElemMap["proto"] {
found = true
foundIdx[i] = true
break
}
}
if !found {
added = append(added, newElem)
}
}
for i, found := range foundIdx {
if !found {
removed = append(removed, oldSlice[i])
}
}
return
}
func differenceUserAccess(oldSet, newSet types.Set) (added, removed []any) {
oldSlice := oldSet.Elements()
newSlice := newSet.Elements()
foundIdx := make([]bool, len(oldSlice))
for _, newElem := range newSlice {
newObj := newElem.(types.Object)
newElemMap := newObj.Attributes()
found := false
for i, oldElem := range oldSlice {
oldObj := oldElem.(types.Object)
oldElemMap := oldObj.Attributes()
if oldElemMap["username"] == newElemMap["username"] && oldElemMap["access_type"] == newElemMap["access_type"] {
found = true
foundIdx[i] = true
break
}
}
if !found {
added = append(added, newElem)
}
}
for i, found := range foundIdx {
if !found {
removed = append(removed, oldSlice[i])
}
}
return
}
func differenceSnapshot(oldSet, newSet types.Set) (added, removed []any) {
oldSlice := oldSet.Elements()
newSlice := newSet.Elements()
foundIdx := make([]bool, len(oldSlice))
for _, newElem := range newSlice {
newObj := newElem.(types.Object)
newElemMap := newObj.Attributes()
found := false
for i, oldElem := range oldSlice {
oldObj := oldElem.(types.Object)
oldElemMap := oldObj.Attributes()
if oldElemMap["label"] == newElemMap["label"] {
found = true
foundIdx[i] = true
break
}
}
if !found {
added = append(added, newElem)
}
}
for i, found := range foundIdx {
if !found {
removed = append(removed, oldSlice[i])
}
}
return
}

File diff suppressed because it is too large Load Diff