This commit is contained in:
asteam
2024-07-25 14:33:38 +03:00
commit 6f40af6a5f
946 changed files with 98335 additions and 0 deletions

View File

@@ -0,0 +1,91 @@
package disks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDisk{}
)
func NewDataSourceDisk() datasource.DataSource {
return &dataSourceDisk{}
}
// dataSourceDisk is the data source implementation.
type dataSourceDisk struct {
client *decort.DecortClient
}
func (d *dataSourceDisk) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceDiskModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDisk: Error get state")
return
}
diskId := uint64(state.DiskID.ValueInt64())
tflog.Info(ctx, "Read dataSourceDisk: got state successfully", map[string]any{"disk_id": diskId})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDisk: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDisk: set timeouts successfully", map[string]any{
"disk_id": diskId,
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDisk: Error flatten data source disk")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDisk: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDisk", map[string]any{"disk_id": diskId})
}
func (d *dataSourceDisk) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDisk(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDisk) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_disk"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDisk) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDisk")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDisk successfully")
}

View File

@@ -0,0 +1,88 @@
package disks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDiskList{}
)
func NewDataSourceDiskList() datasource.DataSource {
return &dataSourceDiskList{}
}
// dataSourceDisk is the data source implementation.
type dataSourceDiskList struct {
client *decort.DecortClient
}
func (d *dataSourceDiskList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceDiskListModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskList: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceDiskList: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskList: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDiskList: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskListDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskList: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskList: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDiskList")
}
func (d *dataSourceDiskList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDiskList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDiskList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_disk_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDiskList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDiskList")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDiskList successfully")
}

View File

@@ -0,0 +1,88 @@
package disks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDiskListDeleted{}
)
func NewDataSourceDiskListDeleted() datasource.DataSource {
return &dataSourceDiskListDeleted{}
}
// dataSourceDisk is the data source implementation.
type dataSourceDiskListDeleted struct {
client *decort.DecortClient
}
func (d *dataSourceDiskListDeleted) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceDiskListDeletedModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceDiskListDeleted: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDiskListDeleted: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskListDeletedDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDiskListDeleted")
}
func (d *dataSourceDiskListDeleted) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDiskListDeleted(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDiskListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_disk_list_deleted"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDiskListDeleted) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDiskListDeleted")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDiskListDeleted successfully")
}

View File

@@ -0,0 +1,89 @@
package disks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDiskListTypes{}
)
func NewDataSourceDiskListTypes() datasource.DataSource {
return &dataSourceDiskListTypes{}
}
// dataSourceDiskListTypes is the data source implementation.
type dataSourceDiskListTypes struct {
client *decort.DecortClient
}
func (d *dataSourceDiskListTypes) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceDiskListTypesModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListTypes: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceDiskListTypes: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListTypes: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDiskListTypes: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskListTypesDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListTypes: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListTypes: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDiskListTypes")
}
func (d *dataSourceDiskListTypes) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDiskListTypes(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDiskListTypes) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_disk_list_types"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDiskListTypes) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDiskListTypes")
tflog.Info(ctx, "Get Configure dataSourceDiskListTypes")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDiskListTypes successfully")
}

View File

@@ -0,0 +1,88 @@
package disks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDiskListTypesDetailed{}
)
func NewDataSourceDiskListTypesDetailed() datasource.DataSource {
return &dataSourceDiskListTypesDetailed{}
}
// dataSourceDiskListTypesDetailed is the data source implementation.
type dataSourceDiskListTypesDetailed struct {
client *decort.DecortClient
}
func (d *dataSourceDiskListTypesDetailed) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceDiskListTypesDetailedModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceDiskListTypesDetailed: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDiskListTypesDetailed: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskListTypesDetailedDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDiskListTypesDetailed")
}
func (d *dataSourceDiskListTypesDetailed) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDiskListTypesDetailed(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDiskListTypesDetailed) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_disk_list_types_detailed"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDiskListTypesDetailed) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDiskListTypesDetailed")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDiskListTypesDetailed successfully")
}

View File

@@ -0,0 +1,88 @@
package disks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDiskListUnattached{}
)
func NewDataSourceDiskListUnattached() datasource.DataSource {
return &dataSourceDiskListUnattached{}
}
// dataSourceDiskListUnattached is the data source implementation.
type dataSourceDiskListUnattached struct {
client *decort.DecortClient
}
func (d *dataSourceDiskListUnattached) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceDiskListUnattachedModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceDiskListUnattached: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDiskListUnattached: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskListUnattachedDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDiskListUnattached")
}
func (d *dataSourceDiskListUnattached) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDiskListUnattached(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDiskListUnattached) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_disk_list_unattached"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDiskListUnattached) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDiskListUnattached")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDiskListUnattached successfully")
}

View File

@@ -0,0 +1,91 @@
package disks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDiskReplication{}
)
func NewDataSourceDiskReplication() datasource.DataSource {
return &dataSourceDiskReplication{}
}
// dataSourceDisk is the data source implementation.
type dataSourceDiskReplication struct {
client *decort.DecortClient
}
func (d *dataSourceDiskReplication) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.RecordDiskModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskReplication: Error get state")
return
}
diskId := uint64(state.DiskId.ValueInt64())
tflog.Info(ctx, "Read dataSourceDiskReplication: got state successfully", map[string]any{"disk_id": diskId})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskReplication: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDiskReplication: set timeouts successfully", map[string]any{
"disk_id": diskId,
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskReplicationDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskReplication: Error flatten data source disk")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskReplication: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDiskReplication", map[string]any{"disk_id": diskId})
}
func (d *dataSourceDiskReplication) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDiskReplication(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDiskReplication) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_disk_replication"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDiskReplication) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDisk")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDisk successfully")
}

View File

@@ -0,0 +1,96 @@
package disks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDiskSnapshot{}
)
func NewDataSourceDiskSnapshot() datasource.DataSource {
return &dataSourceDiskSnapshot{}
}
// dataSourceDiskSnapshotList is the data source implementation.
type dataSourceDiskSnapshot struct {
client *decort.DecortClient
}
func (d *dataSourceDiskSnapshot) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceDiskSnapshotModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error get state")
return
}
ctxSnpsht := map[string]any{
"disk_id": uint64(state.DiskID.ValueInt64()),
"label": state.Label.ValueString(),
}
tflog.Info(ctx, "Read dataSourceDiskSnapshot: got state successfully", ctxSnpsht)
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDiskSnapshot: set timeouts successfully", map[string]any{
"disk_id": uint64(state.DiskID.ValueInt64()),
"label": state.Label.ValueString(),
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskSnapshotDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error flatten data source")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDiskSnapshot", ctxSnpsht)
}
func (d *dataSourceDiskSnapshot) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDiskSnapshot(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDiskSnapshot) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_disk_snapshot"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDiskSnapshot) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDiskSnapshot")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDiskSnapshot successfully")
}

View File

@@ -0,0 +1,91 @@
package disks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDiskSnapshotList{}
)
func NewDataSourceDiskSnapshotList() datasource.DataSource {
return &dataSourceDiskSnapshotList{}
}
// dataSourceDiskSnapshotList is the data source implementation.
type dataSourceDiskSnapshotList struct {
client *decort.DecortClient
}
func (d *dataSourceDiskSnapshotList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceDiskSnapshotListModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error get state")
return
}
diskId := uint64(state.DiskID.ValueInt64())
tflog.Info(ctx, "Read dataSourceDiskSnapshotList: got state successfully", map[string]any{"disk_id": diskId})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDiskSnapshotList: set timeouts successfully", map[string]any{
"disk_id": diskId,
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskSnapshotListDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error flatten data source")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDiskSnapshotList", map[string]any{"disk_id": diskId})
}
func (d *dataSourceDiskSnapshotList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDiskSnapshotList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDiskSnapshotList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_disk_snapshot_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDiskSnapshotList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDiskSnapshotList")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDiskSnapshotList successfully")
}

View File

@@ -0,0 +1,108 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities"
)
// DiskDataSource flattens data source for disk.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskDataSource(ctx context.Context, state *models.DataSourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskDataSource")
diags := diag.Diagnostics{}
diskId := uint64(state.DiskID.ValueInt64())
recordDisk, err := utilities.DataSourceDiskCheckPresence(ctx, diskId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about disk with ID %v", diskId), err.Error())
return diags
}
tflog.Info(ctx, "flattens.DiskDataSource: before flatten", map[string]any{"disk_id": diskId, "recordDisk": recordDisk})
id := uuid.New()
diskAcl, _ := json.Marshal(recordDisk.ACL)
*state = models.DataSourceDiskModel{
DiskID: state.DiskID,
Timeouts: state.Timeouts,
// computed fields
Id: types.StringValue(id.String()),
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
AccountName: types.StringValue(recordDisk.AccountName),
ACL: types.StringValue(string(diskAcl)),
Computes: flattenComputes(ctx, recordDisk.Computes),
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
Description: types.StringValue(recordDisk.Description),
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
DeviceName: types.StringValue(recordDisk.DeviceName),
GID: types.Int64Value(int64(recordDisk.GID)),
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
Name: types.StringValue(recordDisk.Name),
Order: types.Int64Value(int64(recordDisk.Order)),
Params: types.StringValue(recordDisk.Params),
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
Pool: types.StringValue(recordDisk.Pool),
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
ResID: types.StringValue(recordDisk.ResID),
ResName: types.StringValue(recordDisk.ResName),
Role: types.StringValue(recordDisk.Role),
SepID: types.Int64Value(int64(recordDisk.SepID)),
SepType: types.StringValue(recordDisk.SepType),
Shareable: types.BoolValue(recordDisk.Shareable),
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots),
Status: types.StringValue(recordDisk.Status),
TechStatus: types.StringValue(recordDisk.TechStatus),
Type: types.StringValue(recordDisk.Type),
VMID: types.Int64Value(int64(recordDisk.VMID)),
}
state.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskDataSource: cannot flatten recordDisk.Images to state.Images", diags))
}
state.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskDataSource: cannot flatten recordDisk.PresentTo to state.PresentTo", diags))
}
iotune := models.IOTuneModel{
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemIOTune, iotune)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattens.DiskDataSource iotune struct to obj", diags))
}
state.IOTune = obj
tflog.Info(ctx, "flattens.DiskDataSource: end flatten", map[string]any{"disk_id": state.DiskID.ValueInt64()})
return nil
}

View File

@@ -0,0 +1,128 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities"
)
// DiskListDataSource flattens data source for disk list.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskListDataSource(ctx context.Context, state *models.DataSourceDiskListModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskListDataSource")
diags := diag.Diagnostics{}
diskList, diags := utilities.DataSourceDiskListCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.DiskListDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceDiskListModel{
ByID: state.ByID,
Name: state.Name,
AccountName: state.AccountName,
DiskMaxSize: state.DiskMaxSize,
Status: state.Status,
Shared: state.Shared,
AccountID: state.AccountID,
Type: state.Type,
SEPID: state.SEPID,
PoolName: state.PoolName,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
// computed fields
Id: types.StringValue(id.String()),
EntryCount: types.Int64Value(int64(diskList.EntryCount)),
}
items := make([]models.ItemDiskModel, 0, diskList.EntryCount)
for _, recordDisk := range diskList.Data {
diskAcl, _ := json.Marshal(recordDisk.ACL)
d := models.ItemDiskModel{
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
AccountName: types.StringValue(recordDisk.AccountName),
ACL: types.StringValue(string(diskAcl)),
Computes: flattenComputes(ctx, recordDisk.Computes),
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
Description: types.StringValue(recordDisk.Description),
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
DeviceName: types.StringValue(recordDisk.DeviceName),
GID: types.Int64Value(int64(recordDisk.GID)),
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
DiskId: types.Int64Value(int64(recordDisk.ID)),
DiskName: types.StringValue(recordDisk.Name),
Order: types.Int64Value(int64(recordDisk.Order)),
Params: types.StringValue(recordDisk.Params),
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
Pool: types.StringValue(recordDisk.Pool),
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
ResID: types.StringValue(recordDisk.ResID),
ResName: types.StringValue(recordDisk.ResName),
Role: types.StringValue(recordDisk.Role),
SepID: types.Int64Value(int64(recordDisk.SepID)),
SepType: types.StringValue(recordDisk.SepType),
Shareable: types.BoolValue(recordDisk.Shareable),
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots),
Status: types.StringValue(recordDisk.Status),
TechStatus: types.StringValue(recordDisk.TechStatus),
Type: types.StringValue(recordDisk.Type),
VMID: types.Int64Value(int64(recordDisk.VMID)),
}
d.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskListDataSource: cannot flatten recordDisk.Images to d.Images", diags))
}
d.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskListDataSource: cannot flatten recordDisk.PresentTo to d.PresentTo", diags))
}
iotune := models.IOTuneModel{
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemIOTune, iotune)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattens.DiskListDataSource iotune struct to obj", diags))
}
d.IOTune = obj
items = append(items, d)
}
state.Items = items
tflog.Info(ctx, "flattens.DiskListDataSource: end flatten")
return nil
}

View File

@@ -0,0 +1,125 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities"
)
// DiskListDeletedDataSource flattens data source for disk list deleted.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskListDeletedDataSource(ctx context.Context, state *models.DataSourceDiskListDeletedModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskListDeletedDataSource")
diags := diag.Diagnostics{}
diskList, diags := utilities.DataSourceDiskListDeletedCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.DiskListDeletedDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceDiskListDeletedModel{
ByID: state.ByID,
Name: state.Name,
AccountName: state.AccountName,
DiskMaxSize: state.DiskMaxSize,
Shared: state.Shared,
AccountID: state.AccountID,
Type: state.Type,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
// computed fields
Id: types.StringValue(id.String()),
EntryCount: types.Int64Value(int64(diskList.EntryCount)),
}
items := make([]models.ItemDiskModel, 0, diskList.EntryCount)
for _, recordDisk := range diskList.Data {
diskAcl, _ := json.Marshal(recordDisk.ACL)
d := models.ItemDiskModel{
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
AccountName: types.StringValue(recordDisk.AccountName),
ACL: types.StringValue(string(diskAcl)),
Computes: flattenComputes(ctx, recordDisk.Computes),
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
Description: types.StringValue(recordDisk.Description),
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
DeviceName: types.StringValue(recordDisk.DeviceName),
GID: types.Int64Value(int64(recordDisk.GID)),
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
DiskId: types.Int64Value(int64(recordDisk.ID)),
DiskName: types.StringValue(recordDisk.Name),
Order: types.Int64Value(int64(recordDisk.Order)),
Params: types.StringValue(recordDisk.Params),
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
Pool: types.StringValue(recordDisk.Pool),
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
ResID: types.StringValue(recordDisk.ResID),
ResName: types.StringValue(recordDisk.ResName),
Role: types.StringValue(recordDisk.Role),
SepID: types.Int64Value(int64(recordDisk.SepID)),
SepType: types.StringValue(recordDisk.SepType),
Shareable: types.BoolValue(recordDisk.Shareable),
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots),
Status: types.StringValue(recordDisk.Status),
TechStatus: types.StringValue(recordDisk.TechStatus),
Type: types.StringValue(recordDisk.Type),
VMID: types.Int64Value(int64(recordDisk.VMID)),
}
d.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskListDeletedDataSource: cannot flatten recordDisk.Images to d.Images", diags))
}
d.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskListDeletedDataSource: cannot flatten recordDisk.PresentTo to d.PresentTo", diags))
}
iotune := models.IOTuneModel{
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemIOTune, iotune)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattens.DiskListDeletedDataSource iotune struct to obj", diags))
}
d.IOTune = obj
items = append(items, d)
}
state.Items = items
tflog.Info(ctx, "flattens.DiskListDeletedDataSource: end flatten")
return nil
}

View File

@@ -0,0 +1,50 @@
package flattens
import (
"context"
"fmt"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities"
)
// DiskListTypesDataSource flattens data source for disk list types.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskListTypesDataSource(ctx context.Context, state *models.DataSourceDiskListTypesModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskListTypesDataSource")
diags := diag.Diagnostics{}
listTypes, diags := utilities.DataSourceDiskListTypesCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.DiskListTypesDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceDiskListTypesModel{
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
// computed fields
Id: types.StringValue(id.String()),
EntryCount: types.Int64Value(int64(listTypes.EntryCount)),
}
state.Types, diags = types.ListValueFrom(ctx, types.StringType, listTypes.Data)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskListTypesDataSource: cannot flatten listTypes.Data to state.Types", diags))
}
tflog.Info(ctx, "flattens.DiskListTypesDataSource: end flatten")
return nil
}

View File

@@ -0,0 +1,82 @@
package flattens
import (
"context"
"fmt"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities"
)
// DiskListTypesDetailedDataSource flattens data source for disk list types detailed.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskListTypesDetailedDataSource(ctx context.Context, state *models.DataSourceDiskListTypesDetailedModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskListTypesDetailedDataSource")
diags := diag.Diagnostics{}
listTypes, diags := utilities.DataSourceDiskListTypesDetailedCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.DiskListTypesDetailedDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceDiskListTypesDetailedModel{
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
// computed fields
Id: types.StringValue(id.String()),
EntryCount: types.Int64Value(int64(listTypes.EntryCount)),
}
items := make([]models.ItemDiskTypeDetailedModel, 0, len(listTypes.Data))
for _, typeListDetailed := range listTypes.Data {
typeMap := typeListDetailed.(map[string]interface{})
t := models.ItemDiskTypeDetailedModel{
SepID: types.Int64Value(int64(typeMap["sepId"].(float64))),
SepName: types.StringValue(typeMap["sepName"].(string)),
}
var pools []models.ItemPoolModel
poolsTemp := typeMap["pools"].([]interface{})
for _, pool := range poolsTemp {
poolsMap := pool.(map[string]interface{})
p := models.ItemPoolModel{
Name: types.StringValue(poolsMap["name"].(string)),
System: types.StringValue(poolsMap["system"].(string)),
}
p.Types, diags = types.ListValueFrom(ctx, types.StringType, flattenTypes(poolsMap["types"].([]interface{})))
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskListTypesDetailedDataSource: cannot flatten poolsMap[\"types\"] to p.Types", diags))
}
pools = append(pools, p)
}
t.Pools = pools
items = append(items, t)
}
state.Items = items
tflog.Info(ctx, "flattens.DiskListTypesDetailedDataSource: end flatten")
return nil
}
func flattenTypes(typesInterface []interface{}) []string {
var typesList []string
for _, typ := range typesInterface {
typesList = append(typesList, typ.(string))
}
return typesList
}

View File

@@ -0,0 +1,132 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities"
)
// DiskListUnattachedDataSource flattens data source for disk list unattached.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskListUnattachedDataSource(ctx context.Context, state *models.DataSourceDiskListUnattachedModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskListUnattachedDataSource")
diags := diag.Diagnostics{}
diskList, diags := utilities.DataSourceDiskListUnattachedCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.DiskListUnattachedDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceDiskListUnattachedModel{
ByID: state.ByID,
AccountName: state.AccountName,
DiskMaxSize: state.DiskMaxSize,
Status: state.Status,
AccountID: state.AccountID,
SepID: state.SepID,
PoolName: state.PoolName,
Type: state.Type,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
// computed fields
Id: types.StringValue(id.String()),
EntryCount: types.Int64Value(int64(diskList.EntryCount)),
}
items := make([]models.ItemDiskUnattachedModel, 0, diskList.EntryCount)
for _, recordDisk := range diskList.Data {
diskAcl, _ := json.Marshal(recordDisk.ACL)
d := models.ItemDiskUnattachedModel{
CKey: types.StringValue(recordDisk.CKey),
Meta: flattens.Meta(ctx, recordDisk.Meta),
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
AccountName: types.StringValue(recordDisk.AccountName),
ACL: types.StringValue(string(diskAcl)),
BootPartition: types.Int64Value(int64(recordDisk.BootPartition)),
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
Description: types.StringValue(recordDisk.Description),
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
DiskPath: types.StringValue(recordDisk.DiskPath),
GID: types.Int64Value(int64(recordDisk.GID)),
GUID: types.Int64Value(int64(recordDisk.GUID)),
DiskId: types.Int64Value(int64(recordDisk.ID)),
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
Iqn: types.StringValue(recordDisk.IQN),
Login: types.StringValue(recordDisk.Login),
Milestones: types.Int64Value(int64(recordDisk.Milestones)),
DiskName: types.StringValue(recordDisk.Name),
Order: types.Int64Value(int64(recordDisk.Order)),
Params: types.StringValue(recordDisk.Params),
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
Passwd: types.StringValue(recordDisk.Password),
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
Pool: types.StringValue(recordDisk.Pool),
PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)),
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)),
ReferenceID: types.StringValue(recordDisk.ReferenceID),
ResID: types.StringValue(recordDisk.ResID),
ResName: types.StringValue(recordDisk.ResName),
Role: types.StringValue(recordDisk.Role),
SepID: types.Int64Value(int64(recordDisk.SEPID)),
Shareable: types.BoolValue(recordDisk.Shareable),
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots),
Status: types.StringValue(recordDisk.Status),
TechStatus: types.StringValue(recordDisk.TechStatus),
Type: types.StringValue(recordDisk.Type),
VMID: types.Int64Value(int64(recordDisk.VMID)),
}
d.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskListUnattachedDataSource: cannot flatten recordDisk.Images to d.Images", diags))
}
iotune := models.IOTuneModel{
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemIOTune, iotune)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattens.DiskListUnattachedDataSource iotune struct to obj", diags))
}
d.IOTune = obj
items = append(items, d)
}
state.Items = items
tflog.Info(ctx, "flattens.DiskListUnattachedDataSource: end flatten")
return nil
}

View File

@@ -0,0 +1,130 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities"
)
// DiskReplicationDataSource flattens data source for disk.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskReplicationDataSource(ctx context.Context, state *models.RecordDiskModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskReplicationDataSource")
diags := diag.Diagnostics{}
recordDisk, status, err := utilities.DataSourceDiskReplicationCheckPresence(ctx, state, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about disk"), err.Error())
return diags
}
tflog.Info(ctx, "flattens.DiskReplicationDataSource: before flatten")
diskAcl, _ := json.Marshal(recordDisk.ACL)
*state = models.RecordDiskModel{
DiskId: state.DiskId,
ID: state.ID,
Timeouts: state.Timeouts,
ACL: types.StringValue(string(diskAcl)),
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
AccountName: types.StringValue(recordDisk.AccountName),
Computes: flattenDRComputes(ctx, recordDisk.Computes),
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
DeviceName: types.StringValue(recordDisk.DeviceName),
Description: types.StringValue(recordDisk.Description),
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
GID: types.Int64Value(int64(recordDisk.GID)),
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
Images: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.Images),
Name: types.StringValue(recordDisk.Name),
Order: types.Int64Value(int64(recordDisk.Order)),
Params: types.StringValue(recordDisk.Params),
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
Pool: types.StringValue(recordDisk.Pool),
PresentTo: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.PresentTo),
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
Replication: &models.ItemReplicationModel{},
ResID: types.StringValue(recordDisk.ResID),
ResName: types.StringValue(recordDisk.ResName),
Role: types.StringValue(recordDisk.Role),
SepType: types.StringValue(recordDisk.SepType),
SepID: types.Int64Value(int64(recordDisk.SepID)),
Shareable: types.BoolValue(recordDisk.Shareable),
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots),
Status: types.StringValue(recordDisk.Status),
StatusReplication: types.StringValue(*status),
TechStatus: types.StringValue(recordDisk.TechStatus),
Type: types.StringValue(recordDisk.Type),
VMID: types.Int64Value(int64(recordDisk.VMID)),
}
iotune := models.DiskReplicationIOTune{
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
}
state.IOTune = iotune
itemReplication := models.ItemReplicationModel{
DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)),
PoolID: types.StringValue(recordDisk.Replication.PoolID),
Role: types.StringValue(recordDisk.Replication.Role),
SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID),
StorageID: types.StringValue(recordDisk.Replication.StorageID),
VolumeID: types.StringValue(recordDisk.Replication.VolumeID),
}
state.Replication = &itemReplication
tflog.Info(ctx, "flattens.ReplicationDiskDataSource: end flatten")
return nil
}
func flattenDRComputes(ctx context.Context, items map[string]string) types.List {
tflog.Info(ctx, "Start flattenDRComputes")
tempSlice := make([]types.Object, 0, len(items))
for id, name := range items {
temp := models.ItemComputeModel{
ComputeId: types.StringValue(id),
ComputeName: types.StringValue(name),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemCompute, temp)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenDRComputes struct to obj", diags))
}
tempSlice = append(tempSlice, obj)
}
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemCompute}, tempSlice)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenDRComputes", diags))
}
tflog.Info(ctx, "End flattenDRComputes")
return res
}

View File

@@ -0,0 +1,50 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities"
)
// DiskSnapshotDataSource flattens data source for disk snapshot.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskSnapshotDataSource(ctx context.Context, state *models.DataSourceDiskSnapshotModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskSnapshotDataSource")
diskId := uint64(state.DiskID.ValueInt64())
item, diags := utilities.DataSourceDiskSnapshotCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.DiskSnapshotDataSource: before flatten", map[string]any{"disk_id": diskId, "snapshot": item})
id := uuid.New()
*state = models.DataSourceDiskSnapshotModel{
DiskID: state.DiskID,
Label: state.Label,
Timeouts: state.Timeouts,
// computed fields
Id: types.StringValue(id.String()),
GUID: types.StringValue(item.GUID),
ResID: types.StringValue(item.ResID),
SnapSetGUID: types.StringValue(item.SnapSetGUID),
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
TimeStamp: types.Int64Value(int64(item.TimeStamp)),
}
tflog.Info(ctx, "flattens.DiskSnapshotDataSource: end flatten", map[string]any{
"disk_id": state.DiskID.ValueInt64(),
"label": state.Label.ValueString(),
})
return nil
}

View File

@@ -0,0 +1,46 @@
package flattens
import (
"context"
"fmt"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities"
)
// DiskSnapshotListDataSource flattens data source for disk snapshot list.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskSnapshotListDataSource(ctx context.Context, state *models.DataSourceDiskSnapshotListModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskSnapshotListDataSource")
diags := diag.Diagnostics{}
diskId := uint64(state.DiskID.ValueInt64())
snapshots, err := utilities.DiskSnapshotListCheckPresence(ctx, diskId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about disk snapshot list with disk ID %v", diskId), err.Error())
return diags
}
tflog.Info(ctx, "flattens.DiskSnapshotListDataSource: before flatten", map[string]any{"disk_id": diskId, "snapshots": snapshots})
id := uuid.New()
*state = models.DataSourceDiskSnapshotListModel{
DiskID: state.DiskID,
Timeouts: state.Timeouts,
// computed fields
Id: types.StringValue(id.String()),
Items: flattenSnapshots(ctx, *snapshots),
}
tflog.Info(ctx, "flattens.DiskSnapshotListDataSource: end flatten", map[string]any{"disk_id": state.DiskID.ValueInt64()})
return nil
}

View File

@@ -0,0 +1,189 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"strconv"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities"
)
// DiskResource flattens resource for disk.
// Return error in case resource is not found on the platform.
// Flatten errors are added to tflog.
func DiskResource(ctx context.Context, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskResource")
diags := diag.Diagnostics{}
diskId, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("flattens.DiskResource: Cannot parse disk ID from state", err.Error())
return diags
}
recordDisk, err := utilities.DiskCheckPresence(ctx, diskId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about disk with ID %v", diskId), err.Error())
return diags
}
tflog.Info(ctx, "flattens.DiskResource: before flatten", map[string]any{"disk_id": diskId, "recordDisk": recordDisk})
diskAcl, _ := json.Marshal(recordDisk.ACL)
*plan = models.ResourceDiskModel{
// required fields
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
DiskName: types.StringValue(recordDisk.Name),
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
GID: types.Int64Value(int64(recordDisk.GID)),
// optional fields
Description: plan.Description,
Pool: plan.Pool,
SEPID: plan.SEPID,
Type: plan.Type,
Detach: plan.Detach,
Permanently: plan.Permanently,
Reason: plan.Reason,
Shareable: plan.Shareable,
Timeouts: plan.Timeouts,
// computed fields
LastUpdated: plan.LastUpdated,
Id: types.StringValue(strconv.Itoa(int(recordDisk.ID))),
DiskId: types.Int64Value(int64(recordDisk.ID)),
AccountName: types.StringValue(recordDisk.AccountName),
ACL: types.StringValue(string(diskAcl)),
Computes: flattenComputes(ctx, recordDisk.Computes),
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
DeviceName: types.StringValue(recordDisk.DeviceName),
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
Order: types.Int64Value(int64(recordDisk.Order)),
Params: types.StringValue(recordDisk.Params),
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
ResID: types.StringValue(recordDisk.ResID),
ResName: types.StringValue(recordDisk.ResName),
Role: types.StringValue(recordDisk.Role),
SepType: types.StringValue(recordDisk.SepType),
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots),
Status: types.StringValue(recordDisk.Status),
TechStatus: types.StringValue(recordDisk.TechStatus),
VMID: types.Int64Value(int64(recordDisk.VMID)),
}
plan.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskResource: cannot flatten recordDisk.Images to plan.Images", diags))
}
plan.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskResource: cannot flatten recordDisk.PresentTo to plan.PresentTo", diags))
}
if plan.Description.IsUnknown() {
plan.Description = types.StringValue(recordDisk.Description)
}
if plan.Pool.IsUnknown() {
plan.Pool = types.StringValue(recordDisk.Pool)
}
if plan.SEPID.IsUnknown() {
plan.SEPID = types.Int64Value(int64(recordDisk.SepID))
}
if plan.Shareable.IsUnknown() {
plan.Shareable = types.BoolValue(recordDisk.Shareable)
}
if plan.Type.IsUnknown() {
plan.Type = types.StringValue(recordDisk.Type)
}
iotune := models.IOTuneModel{
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemIOTune, iotune)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattens.DiskResource iotune struct to obj", diags))
}
plan.IOTune = obj
tflog.Info(ctx, "flattens.DiskResource: after flatten", map[string]any{"disk_id": plan.Id.ValueString()})
tflog.Info(ctx, "End flattens.DiskResource")
return nil
}
func flattenComputes(ctx context.Context, items map[string]string) types.List {
tflog.Info(ctx, "Start flattenComputes")
tempSlice := make([]types.Object, 0, len(items))
for id, name := range items {
temp := models.ItemComputeModel{
ComputeId: types.StringValue(id),
ComputeName: types.StringValue(name),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemCompute, temp)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenComputes struct to obj", diags))
}
tempSlice = append(tempSlice, obj)
}
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemCompute}, tempSlice)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenComputes", diags))
}
tflog.Info(ctx, "End flattenComputes")
return res
}
func flattenSnapshots(ctx context.Context, snapshots disks.ListSnapshots) types.List {
tflog.Info(ctx, "Start flattenSnapshots")
tempSlice := make([]types.Object, 0, len(snapshots))
for _, item := range snapshots {
temp := models.ItemSnapshotModel{
GUID: types.StringValue(item.GUID),
Label: types.StringValue(item.Label),
ResID: types.StringValue(item.ResID),
SnapSetGUID: types.StringValue(item.SnapSetGUID),
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
TimeStamp: types.Int64Value(int64(item.TimeStamp)),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemSnapshot, temp)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenSnapshots struct to obj", diags))
}
tempSlice = append(tempSlice, obj)
}
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemSnapshot}, tempSlice)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenSnapshots", diags))
}
tflog.Info(ctx, "End flattenSnapshots")
return res
}

View File

@@ -0,0 +1,146 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"strconv"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities"
)
// ReplicationDiskresource flattens resource for disk.
// Return error in case resource is not found on the platform.
// Flatten errors are added to tflog.
func DiskReplicationResource(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskReplicationresource")
diags := diag.Diagnostics{}
recordDisk, status, err := utilities.ResourceDiskReplicationCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Cannot get info about disk", err.Error())
return diags
}
tflog.Info(ctx, "flattens.DiskReplicationresource: before flatten")
diskAcl, _ := json.Marshal(recordDisk.ACL)
*state = models.ResourceRecordDiskReplicationModel{
DiskId: state.DiskId,
Name: state.Name,
SepID: state.SepID,
ReplicationId: state.ReplicationId,
Timeouts: state.Timeouts,
PoolName: state.PoolName,
Pause: state.Pause,
Reverse: state.Reverse,
Start: state.Start,
Detach: state.Detach,
Permanently: state.Permanently,
Reason: state.Reason,
Id: types.StringValue(strconv.Itoa(int(recordDisk.Replication.DiskID))),
ACL: types.StringValue(string(diskAcl)),
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
AccountName: types.StringValue(recordDisk.AccountName),
Computes: flattenRComputes(ctx, recordDisk.Computes),
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
DeviceName: types.StringValue(recordDisk.DeviceName),
Description: types.StringValue(recordDisk.Description),
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
GID: types.Int64Value(int64(recordDisk.GID)),
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
Images: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.Images),
Order: types.Int64Value(int64(recordDisk.Order)),
Params: types.StringValue(recordDisk.Params),
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
Pool: types.StringValue(recordDisk.Pool),
PresentTo: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.PresentTo),
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
ResID: types.StringValue(recordDisk.ResID),
ResName: types.StringValue(recordDisk.ResName),
Role: types.StringValue(recordDisk.Role),
SepType: types.StringValue(recordDisk.SepType),
Shareable: types.BoolValue(recordDisk.Shareable),
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots),
Status: types.StringValue(recordDisk.Status),
StatusReplication: types.StringValue(*status),
TechStatus: types.StringValue(recordDisk.TechStatus),
Type: types.StringValue(recordDisk.Type),
VMID: types.Int64Value(int64(recordDisk.VMID)),
}
iotune := models.ResourceDiskReplicationIOTuneModel{
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
}
obj, diags := types.ObjectValueFrom(ctx, models.ResourceDiskReplicationIOTune, iotune)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattens.ReplicationDiskresource iotune struct to obj", diags))
}
state.IOTune = obj
itemReplication := models.ResourceItemReplicationModel{
DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)),
PoolID: types.StringValue(recordDisk.Replication.PoolID),
Role: types.StringValue(recordDisk.Replication.Role),
SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID),
StorageID: types.StringValue(recordDisk.Replication.StorageID),
VolumeID: types.StringValue(recordDisk.Replication.VolumeID),
}
obj, diags = types.ObjectValueFrom(ctx, models.ResourceItemReplication, itemReplication)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattens.ReplicationDiskresource Replication struct to obj", diags))
}
state.Replication = obj
tflog.Info(ctx, "flattens.ReplicationDiskresource: end flatten")
return nil
}
func flattenRComputes(ctx context.Context, items map[string]string) types.List {
tflog.Info(ctx, "Start flattenRComputes")
tempSlice := make([]types.Object, 0, len(items))
for id, name := range items {
temp := models.ItemComputeModel{
ComputeId: types.StringValue(id),
ComputeName: types.StringValue(name),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemCompute, temp)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenRComputes struct to obj", diags))
}
tempSlice = append(tempSlice, obj)
}
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemCompute}, tempSlice)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenRComputes", diags))
}
tflog.Info(ctx, "End flattenRComputes")
return res
}

View File

@@ -0,0 +1,65 @@
package flattens
import (
"context"
"fmt"
"strings"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities"
)
// DiskSnapshotResource flattens resource for disk snapshot.
// Return error in case resource is not found on the platform.
// Flatten errors are added to tflog.
func DiskSnapshotResource(ctx context.Context, plan *models.ResourceDiskSnapshotModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskSnapshotResource", map[string]any{
"disk_id": plan.DiskID.ValueInt64(),
"label": plan.Label.ValueString()})
recordSnapshot, diags := utilities.DiskSnapshotCheckPresence(ctx, plan, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.DiskSnapshotResource: before flatten", map[string]any{
"disk_id": plan.DiskID.ValueInt64(),
"label": plan.Label.ValueString(),
"recordDisk": recordSnapshot})
id := plan.Id
if !strings.Contains(id.ValueString(), "#") {
id = types.StringValue(fmt.Sprintf("%d#%s", plan.DiskID.ValueInt64(), plan.Label.ValueString()))
}
*plan = models.ResourceDiskSnapshotModel{
// required fields
DiskID: plan.DiskID,
Label: types.StringValue(recordSnapshot.Label),
// optional fields
Rollback: plan.Rollback,
TimeStamp: plan.TimeStamp,
Timeouts: plan.Timeouts,
// computed fields
Id: id,
GUID: types.StringValue(recordSnapshot.GUID),
ResID: types.StringValue(recordSnapshot.ResID),
SnapSetGUID: types.StringValue(recordSnapshot.SnapSetGUID),
SnapSetTime: types.Int64Value(int64(recordSnapshot.SnapSetTime)),
}
if plan.TimeStamp.IsUnknown() {
plan.TimeStamp = types.Int64Value(int64(recordSnapshot.TimeStamp))
}
tflog.Info(ctx, "flattens.DiskResource: after flatten", map[string]any{
"disk_id": plan.DiskID.ValueInt64(),
"label": plan.Label.ValueString()})
return nil
}

View File

@@ -0,0 +1,99 @@
package disks
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/ic"
)
// resourceDiskCreateInputChecks checks if user provided account_id and gid exist on the platform during disk creation.
func resourceDiskCreateInputChecks(ctx context.Context, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
accountId := uint64(plan.AccountID.ValueInt64())
tflog.Info(ctx, "resourceDiskCreateInputChecks: exist account check", map[string]any{"account_id": accountId})
err := ic.ExistAccount(ctx, accountId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about account with ID %v", accountId), err.Error())
}
gid := uint64(plan.GID.ValueInt64())
tflog.Info(ctx, "resourceDiskCreateInputChecks: exist gid check", map[string]any{"gid": gid})
err = ic.ExistGID(ctx, gid, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about GID %v", gid), err.Error())
}
return diags
}
// resourceDiskReplicationInputChecks checks if user provided disk_id exist on the platform during disk replication.
func resourceDiskReplicationInputChecks(ctx context.Context, plan *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
diskId := uint64(plan.DiskId.ValueInt64())
tflog.Info(ctx, "resourceDiskCreateInputChecks: exist disk check", map[string]any{"disk_id": diskId})
err := ic.ExistDiskID(ctx, diskId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about disk with ID %v", diskId), err.Error())
}
return diags
}
// resourceDiskUpdateInputChecks checks if user provided:
// account_id and gid exist on the platform during disk creation,
// description, pool, sep_id, type are not attempted to be changed.
func resourceDiskUpdateInputChecks(ctx context.Context, plan, state *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
// check accountId and gid
diags.Append(resourceDiskCreateInputChecks(ctx, plan, c)...)
// check description
if !plan.Description.Equal(state.Description) && !plan.Description.IsUnknown() {
diags.AddError(
"resourceDiskUpdateInputChecks: description change is not allowed",
fmt.Sprintf("cannot change description from %s to %s for disk id %s",
state.Description.ValueString(),
plan.Description.ValueString(),
plan.Id.ValueString()))
}
// check pool
if !plan.Pool.Equal(state.Pool) && !plan.Pool.IsUnknown() {
diags.AddError(
"resourceDiskUpdateInputChecks: pool change is not allowed",
fmt.Sprintf("cannot change pool from %s to %s for disk id %s",
state.Pool.ValueString(),
plan.Pool.ValueString(),
plan.Id.ValueString()))
}
// check sep_id
if !plan.SEPID.Equal(state.SEPID) && !plan.SEPID.IsUnknown() {
diags.AddError(
"resourceDiskUpdateInputChecks: sep_id change is not allowed",
fmt.Sprintf("cannot change sep_id from %d to %d for disk id %s",
state.SEPID.ValueInt64(),
plan.SEPID.ValueInt64(),
plan.Id.ValueString()))
}
// check type
if !plan.Type.Equal(state.Type) && !plan.Type.IsUnknown() {
diags.AddError(
"resourceDiskUpdateInputChecks: type change is not allowed",
fmt.Sprintf("cannot change type from %s to %s for disk id %s",
state.Type.ValueString(),
plan.Type.ValueString(),
plan.Id.ValueString()))
}
return diags
}

View File

@@ -0,0 +1,49 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceDiskModel struct {
// request fields
DiskID types.Int64 `tfsdk:"disk_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
ACL types.String `tfsdk:"acl"`
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
Computes types.List `tfsdk:"computes"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
DeviceName types.String `tfsdk:"devicename"`
Description types.String `tfsdk:"desc"`
DestructionTime types.Int64 `tfsdk:"destruction_time"`
GID types.Int64 `tfsdk:"gid"`
ImageID types.Int64 `tfsdk:"image_id"`
Images types.List `tfsdk:"images"`
IOTune types.Object `tfsdk:"iotune"`
Name types.String `tfsdk:"disk_name"`
Order types.Int64 `tfsdk:"order"`
Params types.String `tfsdk:"params"`
ParentID types.Int64 `tfsdk:"parent_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
Pool types.String `tfsdk:"pool"`
PresentTo types.List `tfsdk:"present_to"`
PurgeTime types.Int64 `tfsdk:"purge_time"`
ResID types.String `tfsdk:"res_id"`
ResName types.String `tfsdk:"res_name"`
Role types.String `tfsdk:"role"`
SepType types.String `tfsdk:"sep_type"`
SepID types.Int64 `tfsdk:"sep_id"`
Shareable types.Bool `tfsdk:"shareable"`
SizeMax types.Int64 `tfsdk:"size_max"`
SizeUsed types.Float64 `tfsdk:"size_used"`
Snapshots types.List `tfsdk:"snapshots"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
Type types.String `tfsdk:"type"`
VMID types.Int64 `tfsdk:"vmid"`
}

View File

@@ -0,0 +1,69 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceDiskListModel struct {
// request fields
ByID types.Int64 `tfsdk:"by_id"`
Name types.String `tfsdk:"name"`
AccountName types.String `tfsdk:"account_name"`
DiskMaxSize types.Int64 `tfsdk:"disk_max_size"`
Status types.String `tfsdk:"status"`
Shared types.Bool `tfsdk:"shared"`
AccountID types.Int64 `tfsdk:"account_id"`
Type types.String `tfsdk:"type"`
SEPID types.Int64 `tfsdk:"sep_id"`
PoolName types.String `tfsdk:"pool_name"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemDiskModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemDiskModel struct {
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
ACL types.String `tfsdk:"acl"`
Computes types.List `tfsdk:"computes"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Description types.String `tfsdk:"desc"`
DestructionTime types.Int64 `tfsdk:"destruction_time"`
DeviceName types.String `tfsdk:"devicename"`
GID types.Int64 `tfsdk:"gid"`
ImageID types.Int64 `tfsdk:"image_id"`
Images types.List `tfsdk:"images"`
IOTune types.Object `tfsdk:"iotune"`
MachineID types.Int64 `tfsdk:"machine_id"`
MachineName types.String `tfsdk:"machine_name"`
DiskId types.Int64 `tfsdk:"disk_id"`
DiskName types.String `tfsdk:"disk_name"`
Order types.Int64 `tfsdk:"order"`
Params types.String `tfsdk:"params"`
ParentID types.Int64 `tfsdk:"parent_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
Pool types.String `tfsdk:"pool"`
PresentTo types.List `tfsdk:"present_to"`
PurgeTime types.Int64 `tfsdk:"purge_time"`
ResID types.String `tfsdk:"res_id"`
ResName types.String `tfsdk:"res_name"`
Role types.String `tfsdk:"role"`
SepID types.Int64 `tfsdk:"sep_id"`
SepType types.String `tfsdk:"sep_type"`
Shareable types.Bool `tfsdk:"shareable"`
SizeMax types.Int64 `tfsdk:"size_max"`
SizeUsed types.Float64 `tfsdk:"size_used"`
Snapshots types.List `tfsdk:"snapshots"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
Type types.String `tfsdk:"type"`
VMID types.Int64 `tfsdk:"vmid"`
}

View File

@@ -0,0 +1,26 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceDiskListDeletedModel struct {
// request fields
ByID types.Int64 `tfsdk:"by_id"`
Name types.String `tfsdk:"name"`
AccountName types.String `tfsdk:"account_name"`
DiskMaxSize types.Int64 `tfsdk:"disk_max_size"`
Shared types.Bool `tfsdk:"shared"`
AccountID types.Int64 `tfsdk:"account_id"`
Type types.String `tfsdk:"type"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemDiskModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}

View File

@@ -0,0 +1,19 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceDiskListTypesModel struct {
// request fields - optional
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Types types.List `tfsdk:"types"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}

View File

@@ -0,0 +1,31 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceDiskListTypesDetailedModel struct {
// request fields - optional
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemDiskTypeDetailedModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemDiskTypeDetailedModel struct {
Pools []ItemPoolModel `tfsdk:"pools"`
SepID types.Int64 `tfsdk:"sep_id"`
SepName types.String `tfsdk:"sep_name"`
}
type ItemPoolModel struct {
Name types.String `tfsdk:"name"`
System types.String `tfsdk:"system"`
Types types.List `tfsdk:"types"`
}

View File

@@ -0,0 +1,73 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceDiskListUnattachedModel struct {
// request fields
ByID types.Int64 `tfsdk:"by_id"`
AccountName types.String `tfsdk:"account_name"`
DiskMaxSize types.Int64 `tfsdk:"disk_max_size"`
Status types.String `tfsdk:"status"`
AccountID types.Int64 `tfsdk:"account_id"`
SepID types.Int64 `tfsdk:"sep_id"`
PoolName types.String `tfsdk:"pool_name"`
Type types.String `tfsdk:"type"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemDiskUnattachedModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemDiskUnattachedModel struct {
CKey types.String `tfsdk:"ckey"`
Meta types.List `tfsdk:"meta"`
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
ACL types.String `tfsdk:"acl"`
BootPartition types.Int64 `tfsdk:"boot_partition"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Description types.String `tfsdk:"desc"`
DestructionTime types.Int64 `tfsdk:"destruction_time"`
DiskPath types.String `tfsdk:"disk_path"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
DiskId types.Int64 `tfsdk:"disk_id"`
ImageID types.Int64 `tfsdk:"image_id"`
Images types.List `tfsdk:"images"`
IOTune types.Object `tfsdk:"iotune"`
Iqn types.String `tfsdk:"iqn"`
Login types.String `tfsdk:"login"`
Milestones types.Int64 `tfsdk:"milestones"`
DiskName types.String `tfsdk:"disk_name"`
Order types.Int64 `tfsdk:"order"`
Params types.String `tfsdk:"params"`
ParentID types.Int64 `tfsdk:"parent_id"`
Passwd types.String `tfsdk:"passwd"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
Pool types.String `tfsdk:"pool"`
PurgeAttempts types.Int64 `tfsdk:"purge_attempts"`
PurgeTime types.Int64 `tfsdk:"purge_time"`
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
ReferenceID types.String `tfsdk:"reference_id"`
ResID types.String `tfsdk:"res_id"`
ResName types.String `tfsdk:"res_name"`
Role types.String `tfsdk:"role"`
SepID types.Int64 `tfsdk:"sep_id"`
Shareable types.Bool `tfsdk:"shareable"`
SizeMax types.Int64 `tfsdk:"size_max"`
SizeUsed types.Float64 `tfsdk:"size_used"`
Snapshots types.List `tfsdk:"snapshots"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
Type types.String `tfsdk:"type"`
VMID types.Int64 `tfsdk:"vmid"`
}

View File

@@ -0,0 +1,86 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type RecordDiskModel struct {
// request fields - required
DiskId types.Int64 `tfsdk:"disk_id"`
ID types.Int64 `tfsdk:"replica_disk_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
ACL types.String `tfsdk:"acl"`
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
Computes types.List `tfsdk:"computes"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
DeviceName types.String `tfsdk:"devicename"`
Description types.String `tfsdk:"desc"`
DestructionTime types.Int64 `tfsdk:"destruction_time"`
GID types.Int64 `tfsdk:"gid"`
ImageID types.Int64 `tfsdk:"image_id"`
Images types.List `tfsdk:"images"`
IOTune DiskReplicationIOTune `tfsdk:"iotune"`
Name types.String `tfsdk:"disk_name"`
Order types.Int64 `tfsdk:"order"`
Params types.String `tfsdk:"params"`
ParentID types.Int64 `tfsdk:"parent_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
Pool types.String `tfsdk:"pool"`
PresentTo types.List `tfsdk:"present_to"`
PurgeTime types.Int64 `tfsdk:"purge_time"`
Replication *ItemReplicationModel `tfsdk:"replication"`
ResID types.String `tfsdk:"res_id"`
ResName types.String `tfsdk:"res_name"`
Role types.String `tfsdk:"role"`
SepType types.String `tfsdk:"sep_type"`
SepID types.Int64 `tfsdk:"sep_id"`
Shareable types.Bool `tfsdk:"shareable"`
SizeMax types.Int64 `tfsdk:"size_max"`
SizeUsed types.Float64 `tfsdk:"size_used"`
Snapshots types.List `tfsdk:"snapshots"`
Status types.String `tfsdk:"status"`
StatusReplication types.String `tfsdk:"status_replication"`
TechStatus types.String `tfsdk:"tech_status"`
Type types.String `tfsdk:"type"`
VMID types.Int64 `tfsdk:"vmid"`
}
type DiskReplicationIOTune struct {
ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"`
ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"`
ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"`
ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"`
SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"`
TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"`
TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"`
TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"`
TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"`
WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"`
WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"`
WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"`
WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"`
}
type ItemReplicationModel struct {
DiskID types.Int64 `tfsdk:"disk_id"`
PoolID types.String `tfsdk:"pool_id"`
Role types.String `tfsdk:"role"`
SelfVolumeID types.String `tfsdk:"self_volume_id"`
StorageID types.String `tfsdk:"storage_id"`
VolumeID types.String `tfsdk:"volume_id"`
}
type DiskReplicationItemSnapshot struct {
GUID types.String `tfsdk:"guid"`
Label types.String `tfsdk:"label"`
ReferenceID types.String `tfsdk:"reference_id"`
ResID types.String `tfsdk:"res_id"`
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
TimeStamp types.Int64 `tfsdk:"timestamp"`
}

View File

@@ -0,0 +1,21 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceDiskSnapshotModel struct {
// request fields
DiskID types.Int64 `tfsdk:"disk_id"`
Label types.String `tfsdk:"label"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
GUID types.String `tfsdk:"guid"`
ResID types.String `tfsdk:"res_id"`
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
TimeStamp types.Int64 `tfsdk:"timestamp"`
}

View File

@@ -0,0 +1,16 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceDiskSnapshotListModel struct {
// request fields
DiskID types.Int64 `tfsdk:"disk_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items types.List `tfsdk:"items"`
}

View File

@@ -0,0 +1,116 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ResourceDiskModel struct {
// request fields - required
AccountID types.Int64 `tfsdk:"account_id"`
DiskName types.String `tfsdk:"disk_name"`
SizeMax types.Int64 `tfsdk:"size_max"`
GID types.Int64 `tfsdk:"gid"`
// request fields - optional
Description types.String `tfsdk:"desc"`
Pool types.String `tfsdk:"pool"`
SEPID types.Int64 `tfsdk:"sep_id"`
Type types.String `tfsdk:"type"`
Detach types.Bool `tfsdk:"detach"`
Permanently types.Bool `tfsdk:"permanently"`
Reason types.String `tfsdk:"reason"`
Shareable types.Bool `tfsdk:"shareable"`
IOTune types.Object `tfsdk:"iotune"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
LastUpdated types.String `tfsdk:"last_updated"`
ACL types.String `tfsdk:"acl"`
AccountName types.String `tfsdk:"account_name"`
Computes types.List `tfsdk:"computes"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
DeviceName types.String `tfsdk:"devicename"`
DestructionTime types.Int64 `tfsdk:"destruction_time"`
DiskId types.Int64 `tfsdk:"disk_id"`
ImageID types.Int64 `tfsdk:"image_id"`
Images types.List `tfsdk:"images"`
Order types.Int64 `tfsdk:"order"`
Params types.String `tfsdk:"params"`
ParentID types.Int64 `tfsdk:"parent_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
PresentTo types.List `tfsdk:"present_to"`
PurgeTime types.Int64 `tfsdk:"purge_time"`
ResID types.String `tfsdk:"res_id"`
ResName types.String `tfsdk:"res_name"`
Role types.String `tfsdk:"role"`
SepType types.String `tfsdk:"sep_type"`
SizeUsed types.Float64 `tfsdk:"size_used"`
Snapshots types.List `tfsdk:"snapshots"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
VMID types.Int64 `tfsdk:"vmid"`
}
type IOTuneModel struct {
ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"`
ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"`
ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"`
ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"`
SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"`
TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"`
TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"`
TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"`
TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"`
WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"`
WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"`
WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"`
WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"`
}
type ItemSnapshotModel struct {
GUID types.String `tfsdk:"guid"`
Label types.String `tfsdk:"label"`
ResID types.String `tfsdk:"res_id"`
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
TimeStamp types.Int64 `tfsdk:"timestamp"`
}
type ItemComputeModel struct {
ComputeId types.String `tfsdk:"compute_id"`
ComputeName types.String `tfsdk:"compute_name"`
}
var ItemCompute = map[string]attr.Type{
"compute_id": types.StringType,
"compute_name": types.StringType,
}
var ItemSnapshot = map[string]attr.Type{
"guid": types.StringType,
"label": types.StringType,
"res_id": types.StringType,
"snap_set_guid": types.StringType,
"snap_set_time": types.Int64Type,
"timestamp": types.Int64Type,
}
var ItemIOTune = map[string]attr.Type{
"read_bytes_sec": types.Int64Type,
"read_bytes_sec_max": types.Int64Type,
"read_iops_sec": types.Int64Type,
"read_iops_sec_max": types.Int64Type,
"size_iops_sec": types.Int64Type,
"total_bytes_sec": types.Int64Type,
"total_bytes_sec_max": types.Int64Type,
"total_iops_sec": types.Int64Type,
"total_iops_sec_max": types.Int64Type,
"write_bytes_sec": types.Int64Type,
"write_bytes_sec_max": types.Int64Type,
"write_iops_sec": types.Int64Type,
"write_iops_sec_max": types.Int64Type,
}

View File

@@ -0,0 +1,110 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ResourceRecordDiskReplicationModel struct {
// request fields
DiskId types.Int64 `tfsdk:"disk_id"`
Name types.String `tfsdk:"disk_name"`
SepID types.Int64 `tfsdk:"sep_id"`
PoolName types.String `tfsdk:"pool_name"`
Pause types.Bool `tfsdk:"pause"`
Reverse types.Bool `tfsdk:"reverse"`
Start types.Bool `tfsdk:"start"`
Detach types.Bool `tfsdk:"detach"`
Permanently types.Bool `tfsdk:"permanently"`
Reason types.String `tfsdk:"reason"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
ACL types.String `tfsdk:"acl"`
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
Computes types.List `tfsdk:"computes"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
DeviceName types.String `tfsdk:"devicename"`
Description types.String `tfsdk:"desc"`
DestructionTime types.Int64 `tfsdk:"destruction_time"`
GID types.Int64 `tfsdk:"gid"`
ImageID types.Int64 `tfsdk:"image_id"`
ReplicationId types.Int64 `tfsdk:"replica_disk_id"`
Images types.List `tfsdk:"images"`
IOTune types.Object `tfsdk:"iotune"`
Order types.Int64 `tfsdk:"order"`
Params types.String `tfsdk:"params"`
ParentID types.Int64 `tfsdk:"parent_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
Pool types.String `tfsdk:"pool"`
PresentTo types.List `tfsdk:"present_to"`
PurgeTime types.Int64 `tfsdk:"purge_time"`
Replication types.Object `tfsdk:"replication"`
ResID types.String `tfsdk:"res_id"`
ResName types.String `tfsdk:"res_name"`
Role types.String `tfsdk:"role"`
SepType types.String `tfsdk:"sep_type"`
Shareable types.Bool `tfsdk:"shareable"`
SizeMax types.Int64 `tfsdk:"size_max"`
SizeUsed types.Float64 `tfsdk:"size_used"`
Snapshots types.List `tfsdk:"snapshots"`
Status types.String `tfsdk:"status"`
StatusReplication types.String `tfsdk:"status_replication"`
TechStatus types.String `tfsdk:"tech_status"`
Type types.String `tfsdk:"type"`
VMID types.Int64 `tfsdk:"vmid"`
}
type ResourceDiskReplicationIOTuneModel struct {
ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"`
ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"`
ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"`
ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"`
SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"`
TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"`
TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"`
TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"`
TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"`
WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"`
WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"`
WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"`
WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"`
}
var ResourceDiskReplicationIOTune = map[string]attr.Type{
"read_bytes_sec": types.Int64Type,
"read_bytes_sec_max": types.Int64Type,
"read_iops_sec": types.Int64Type,
"read_iops_sec_max": types.Int64Type,
"size_iops_sec": types.Int64Type,
"total_bytes_sec": types.Int64Type,
"total_bytes_sec_max": types.Int64Type,
"total_iops_sec": types.Int64Type,
"total_iops_sec_max": types.Int64Type,
"write_bytes_sec": types.Int64Type,
"write_bytes_sec_max": types.Int64Type,
"write_iops_sec": types.Int64Type,
"write_iops_sec_max": types.Int64Type,
}
type ResourceItemReplicationModel struct {
DiskID types.Int64 `tfsdk:"disk_id"`
PoolID types.String `tfsdk:"pool_id"`
Role types.String `tfsdk:"role"`
SelfVolumeID types.String `tfsdk:"self_volume_id"`
StorageID types.String `tfsdk:"storage_id"`
VolumeID types.String `tfsdk:"volume_id"`
}
var ResourceItemReplication = map[string]attr.Type{
"disk_id": types.Int64Type,
"pool_id": types.StringType,
"role": types.StringType,
"self_volume_id": types.StringType,
"storage_id:": types.StringType,
"volume_id": types.StringType,
}

View File

@@ -0,0 +1,25 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ResourceDiskSnapshotModel struct {
// request fields - required
DiskID types.Int64 `tfsdk:"disk_id"`
Label types.String `tfsdk:"label"`
// request fields - optional
Rollback types.Bool `tfsdk:"rollback"`
TimeStamp types.Int64 `tfsdk:"timestamp"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
LastUpdated types.String `tfsdk:"last_updated"`
GUID types.String `tfsdk:"guid"`
ResID types.String `tfsdk:"res_id"`
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
}

View File

@@ -0,0 +1,354 @@
package disks
import (
"context"
"reflect"
"strconv"
"time"
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ resource.Resource = &resourceDisk{}
_ resource.ResourceWithImportState = &resourceDisk{}
)
// NewResourceDisk is a helper function to simplify the provider implementation.
func NewResourceDisk() resource.Resource {
return &resourceDisk{}
}
// resourceDisk is the resource implementation.
type resourceDisk struct {
client *decort.DecortClient
}
// Create creates the resource and sets the initial Terraform state.
func (r *resourceDisk) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
// Get plan to create resource group
var plan models.ResourceDiskModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDisk: Error receiving the plan")
return
}
contextCreateMap := map[string]any{
"account_id": plan.AccountID.ValueInt64(),
"disk_name": plan.DiskName.ValueString(),
"size_max": plan.SizeMax.ValueInt64(),
"gid": plan.GID.ValueInt64(),
}
tflog.Info(ctx, "Create resourceDisk: got plan successfully", contextCreateMap)
tflog.Info(ctx, "Create resourceDisk: start creating", contextCreateMap)
// Set timeouts
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDisk: Error set timeout")
return
}
tflog.Info(ctx, "Create resourceDisk: set timeouts successfully", map[string]any{
"account_id": plan.AccountID.ValueInt64(),
"disk_name": plan.DiskName.ValueString(),
"size_max": plan.SizeMax.ValueInt64(),
"gid": plan.GID.ValueInt64(),
"createTimeout": createTimeout})
ctx, cancel := context.WithTimeout(ctx, createTimeout)
defer cancel()
// Check if input values are valid in the platform
tflog.Info(ctx, "Create resourceDisk: starting input checks", contextCreateMap)
resp.Diagnostics.Append(resourceDiskCreateInputChecks(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDisk: Error input checks")
return
}
tflog.Info(ctx, "Create resourceDisk: input checks successful", contextCreateMap)
// Make create request and get response
createReq := utilities.CreateRequestResourceDisk(ctx, &plan)
tflog.Info(ctx, "Create resourceDisk: before call CloudAPI().Disks().Create", map[string]any{"req": createReq})
diskId, err := r.client.CloudAPI().Disks().Create(ctx, createReq)
if err != nil {
resp.Diagnostics.AddError(
"Create resourceDisk: unable to Create Disk",
err.Error(),
)
return
}
plan.Id = types.StringValue(strconv.Itoa(int(diskId)))
tflog.Info(ctx, "Create resourceDisk: disk created", map[string]any{"diskId": diskId, "disk_name": plan.DiskName.ValueString()})
// additional settings after disk creation: in case of failures, warnings are added to resp.Diagnostics,
// because additional settings failure is not critical. If errors were added instead of warnings, terraform
// framework would mark resource as tainted and delete it, which would be unwanted behaviour.
// sets io limits to disk if needed, warnings added to resp.Diagnostics in case of failure.
if !plan.IOTune.IsUnknown() {
resp.Diagnostics.Append(utilities.LimitIOCreateDisk(ctx, diskId, &plan, r.client)...)
}
// share disk if needed, warnings added to resp.Diagnostics in case of failure.
if !plan.Shareable.IsUnknown() && plan.Shareable.ValueBool() { // if shareable = true
resp.Diagnostics.Append(utilities.ShareableCreateDisk(ctx, diskId, r.client)...)
}
tflog.Info(ctx, "Create resourceDisk: resource creation is completed", map[string]any{"disk_id": diskId})
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.DiskResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set data last update
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
// Set state to fully populated data
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
if resp.Diagnostics.HasError() {
return
}
}
// Read refreshes the Terraform state with the latest data.
func (r *resourceDisk) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
// Get current state
var state models.ResourceDiskModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDisk: Error get state")
return
}
tflog.Info(ctx, "Read resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDisk: Error set timeout")
return
}
tflog.Info(ctx, "Read resourceDisk: set timeouts successfully", map[string]any{
"disk_id": state.Id.ValueString(),
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// read status
resp.Diagnostics.Append(utilities.DiskReadStatus(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDisk: Error reading disk status")
return
}
// Overwrite items with refreshed state
resp.Diagnostics.Append(flattens.DiskResource(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDisk: Error flatten disk")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDisk: Error set state")
return
}
tflog.Info(ctx, "End read resourceDisk")
}
// Update updates the resource and sets the updated Terraform state on success.
func (r *resourceDisk) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
// Retrieve values from plan
var plan models.ResourceDiskModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDisk: Error receiving the plan")
return
}
tflog.Info(ctx, "Update resourceDisk: got plan successfully", map[string]any{"disk_id": plan.Id.ValueString()})
// Retrieve values from state
var state models.ResourceDiskModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDisk: Error receiving the state")
return
}
tflog.Info(ctx, "Update resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()})
// Set timeouts
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDisk: Error set timeout")
return
}
tflog.Info(ctx, "Update resourceDisk: set timeouts successfully", map[string]any{
"disk_id": state.Id.ValueString(),
"updateTimeout": updateTimeout})
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
defer cancel()
// Checking if inputs are valid
tflog.Info(ctx, "Update resourceDisk: starting input checks", map[string]any{"disk_id": plan.Id.ValueString()})
resp.Diagnostics.Append(resourceDiskUpdateInputChecks(ctx, &plan, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDisk: Error input checks")
return
}
tflog.Info(ctx, "Update resourceDisk: input checks successful", map[string]any{"disk_id": state.Id.ValueString()})
diskId, err := strconv.Atoi(state.Id.ValueString())
if err != nil {
resp.Diagnostics.AddError("Update resourceDisk: Cannot parse disk ID from state", err.Error())
return
}
// resize disk
if !plan.SizeMax.Equal(state.SizeMax) {
resp.Diagnostics.Append(utilities.SizeMaxUpdateDisk(ctx, uint64(diskId), &plan, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDisk: Error resizing disk")
return
}
}
// rename disk
if !plan.DiskName.Equal(state.DiskName) {
resp.Diagnostics.Append(utilities.NameUpdateDisk(ctx, uint64(diskId), &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDisk: Error renaming disk")
return
}
}
// change io limits
if !reflect.DeepEqual(plan.IOTune, state.IOTune) && !plan.IOTune.IsUnknown() {
resp.Diagnostics.Append(utilities.LimitIOUpdateDisk(ctx, uint64(diskId), &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDisk: Error setting new io limits to disk")
return
}
}
// share/unshare disk
if !plan.Shareable.Equal(state.Shareable) && !plan.Shareable.IsUnknown() {
resp.Diagnostics.Append(utilities.ShareableUpdateDisk(ctx, uint64(diskId), plan.Shareable.ValueBool(), r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDisk: Error with disk share/unshare")
return
}
}
tflog.Info(ctx, "Update resourceDisk: disk update is completed", map[string]any{"disk_id": plan.Id.ValueString()})
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.DiskResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set data last update
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
// Set state to fully populated data
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
if resp.Diagnostics.HasError() {
return
}
}
// Delete deletes the resource and removes the Terraform state on success.
func (r *resourceDisk) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
// Get current state
var state models.ResourceDiskModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceDisk: Error get state")
return
}
tflog.Info(ctx, "Delete resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()})
// Set timeouts
deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceDisk: Error set timeout")
return
}
tflog.Info(ctx, "Delete resourceDisk: set timeouts successfully", map[string]any{
"disk_id": state.Id.ValueString(),
"deleteTimeout": deleteTimeout})
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
defer cancel()
// Delete existing resource group
delReq := disks.DeleteRequest{
DiskID: uint64(state.DiskId.ValueInt64()),
Detach: state.Detach.ValueBool(), // default false
Permanently: state.Permanently.ValueBool(), // default false
}
if !state.Reason.IsNull() {
delReq.Reason = state.Reason.ValueString()
}
tflog.Info(ctx, "Delete resourceDisk: before call CloudAPI().Disks().Delete", map[string]any{"req": delReq})
_, err := r.client.CloudAPI().Disks().Delete(ctx, delReq)
if err != nil {
resp.Diagnostics.AddError("Delete resourceDisk: Error deleting disk with error: ", err.Error())
return
}
tflog.Info(ctx, "End delete resourceDisk", map[string]any{"disk_id": state.Id.ValueString()})
}
// Schema defines the schema for the resource.
func (r *resourceDisk) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaResourceDisk(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
},
}
}
// Metadata returns the resource type name.
func (r *resourceDisk) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_disk"
}
// Configure adds the provider configured client to the resource.
func (r *resourceDisk) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure resourceDisk")
r.client = client.Resource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure resourceDisk successfully")
}
func (r *resourceDisk) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
// Retrieve import ID and save to id attribute
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
}

View File

@@ -0,0 +1,326 @@
package disks
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ resource.Resource = &resourceDiskReplication{}
_ resource.ResourceWithImportState = &resourceDiskReplication{}
)
// NewResourceDisk is a helper function to simplify the provider implementation.
func NewResourceDiskReplications() resource.Resource {
return &resourceDiskReplication{}
}
// resourceDiskReplication is the resource implementation.
type resourceDiskReplication struct {
client *decort.DecortClient
}
// Create creates the resource and sets the initial Terraform state.
func (r *resourceDiskReplication) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
// Get plan to create resource group
var plan models.ResourceRecordDiskReplicationModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDiskReplication: Error receiving the plan")
return
}
tflog.Info(ctx, "Create resourceDiskReplication: got plan successfully")
tflog.Info(ctx, "Create resourceDiskReplication: start creating")
// Set timeouts
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDiskReplication: Error set timeout")
return
}
tflog.Info(ctx, "Create resourceDiskReplication: set timeouts successfully")
ctx, cancel := context.WithTimeout(ctx, createTimeout)
defer cancel()
// Check if input values are valid in the platform
tflog.Info(ctx, "Create resourceDiskReplication: starting input checks")
resp.Diagnostics.Append(resourceDiskReplicationInputChecks(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDiskReplication: Error input checks")
return
}
tflog.Info(ctx, "Create resourceDiskReplication: input checks successful")
reqCreate := disks.ReplicateRequest{
DiskID: uint64(plan.DiskId.ValueInt64()),
Name: plan.Name.ValueString(),
SepID: uint64(plan.SepID.ValueInt64()),
PoolName: plan.PoolName.ValueString(),
}
diskReplicaId, err := r.client.CloudAPI().Disks().Replicate(ctx, reqCreate)
if err != nil {
resp.Diagnostics.AddError(
"Create resourceDiskReplication: unable to replicate disk",
err.Error(),
)
return
}
tflog.Info(ctx, fmt.Sprintf("resourceDiskReplicationCreate: create replica complete for disk with ID: %d", plan.DiskId.ValueInt64()))
start := plan.Start.ValueBool()
ok := !(plan.Start.IsNull() || plan.Start.IsUnknown())
if ok && !start {
tflog.Info(ctx, fmt.Sprintf("resourceDiskReplicationCreate: replication between disk with ID: %d and replica with ID: %d, try to stop", uint64(plan.DiskId.ValueInt64()), diskReplicaId))
reqStop := disks.ReplicationStopRequest{
DiskID: uint64(plan.DiskId.ValueInt64()),
}
_, err = r.client.CloudAPI().Disks().ReplicationStop(ctx, reqStop)
if err != nil {
resp.Diagnostics.AddError(
fmt.Sprintf("resourceDiskReplicationCreate: replication between disk with ID: %d and replica with ID: %d, stoped", uint64(plan.DiskId.ValueInt64()), diskReplicaId),
err.Error(),
)
return
}
}
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set state to fully populated data
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
if resp.Diagnostics.HasError() {
return
}
}
// Read refreshes the Terraform state with the latest data.
func (r *resourceDiskReplication) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
// Get current state
var state models.ResourceRecordDiskReplicationModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskReplication: Error get state")
return
}
tflog.Info(ctx, "Read resourceDiskReplication: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskReplication: Error set timeout")
return
}
tflog.Info(ctx, "Read resourceDiskReplication: set timeouts successfully", map[string]any{
"disk_id": state.DiskId.ValueInt64(),
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// read status
resp.Diagnostics.Append(utilities.ReplicationDiskReadStatus(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskReplication: Error reading disk status")
return
}
// Overwrite items with refreshed state
resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskReplication: Error flatten disk")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskReplication: Error set state")
return
}
tflog.Info(ctx, "End read resourceDiskReplication")
}
// Update updates the resource and sets the updated Terraform state on success.
func (r *resourceDiskReplication) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
// Retrieve values from plan
var plan models.ResourceRecordDiskReplicationModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskReplication: Error receiving the plan")
return
}
tflog.Info(ctx, "Update resourceDiskReplication: got plan successfully", map[string]any{"disk_id": plan.DiskId.ValueInt64()})
// Retrieve values from state
var state models.ResourceRecordDiskReplicationModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskReplication: Error receiving the state")
return
}
tflog.Info(ctx, "Update resourceDiskReplication: got state successfully", map[string]any{"disk_id": state.DiskId.ValueInt64()})
// Set timeouts
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskReplication: Error set timeout")
return
}
tflog.Info(ctx, "Update resourceDiskReplication: set timeouts successfully", map[string]any{
"disk_id": state.DiskId.ValueInt64(),
"updateTimeout": updateTimeout})
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
defer cancel()
// Checking if inputs are valid
tflog.Info(ctx, "Update resourceDiskReplication: starting input checks", map[string]any{"disk_id": plan.DiskId.ValueInt64()})
resp.Diagnostics.Append(resourceDiskReplicationInputChecks(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskReplication: Error input checks")
return
}
tflog.Info(ctx, "Update resourceDiskReplication: input checks successful", map[string]any{"disk_id": state.DiskId.ValueInt64()})
if !plan.Start.Equal(state.Start) {
resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdateStartStop(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdateStartStop")
return
}
}
if !plan.Pause.Equal(state.Pause) {
resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdatePause(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdatePause")
return
}
}
if !plan.Reverse.Equal(state.Reverse) {
resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdateReverse(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdatePause")
return
}
}
tflog.Info(ctx, "Update resourceDiskReplication: disk update is completed", map[string]any{"disk_id": plan.DiskId.ValueInt64()})
// Overwrite items with refreshed state
resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskReplication: Error flatten disk")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskReplication: Error set state")
return
}
tflog.Info(ctx, "End read resourceDiskReplication")
}
// Delete deletes the resource and removes the Terraform state on success.
func (r *resourceDiskReplication) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
// Get current state
var state models.ResourceRecordDiskReplicationModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceDiskReplication: Error get state")
return
}
tflog.Info(ctx, "Delete resourceDiskReplication: got state successfully", map[string]any{"disk_id": state.DiskId.ValueInt64()})
// Set timeouts
deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceDiskReplication: Error set timeout")
return
}
tflog.Info(ctx, "Delete resourceDiskReplication: set timeouts successfully", map[string]any{
"disk_id": state.DiskId.ValueInt64(),
"deleteTimeout": deleteTimeout})
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
defer cancel()
// Delete existing resource group
delReq := disks.DeleteRequest{
DiskID: uint64(state.DiskId.ValueInt64()),
Detach: state.Detach.ValueBool(), // default false
Permanently: state.Permanently.ValueBool(), // default false
}
if !state.Reason.IsNull() {
delReq.Reason = state.Reason.ValueString()
}
tflog.Info(ctx, "Delete resourceDiskReplication: before call CloudAPI().Disks().Delete", map[string]any{"req": delReq})
_, err := r.client.CloudAPI().Disks().Delete(ctx, delReq)
if err != nil {
resp.Diagnostics.AddError("Delete resourceDiskReplication: Error deleting disk with error: ", err.Error())
return
}
tflog.Info(ctx, "End delete resourceDiskReplication", map[string]any{"disk_id": state.DiskId.ValueInt64()})
}
// Schema defines the schema for the resource.
func (r *resourceDiskReplication) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaResourceDiskReplication(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
},
}
}
// Metadata returns the resource type name.
func (r *resourceDiskReplication) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_disk_replication"
}
// Configure adds the provider configured client to the resource.
func (r *resourceDiskReplication) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure resourceDiskReplication")
r.client = client.Resource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure resourceDiskReplication successfully")
}
func (r *resourceDiskReplication) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
// Retrieve import ID and save to id attribute
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
}

View File

@@ -0,0 +1,295 @@
package disks
import (
"context"
"time"
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ resource.Resource = &resourceDiskSnapshot{}
_ resource.ResourceWithImportState = &resourceDiskSnapshot{}
)
// NewResourceDiskSnapshot is a helper function to simplify the provider implementation.
func NewResourceDiskSnapshot() resource.Resource {
return &resourceDiskSnapshot{}
}
// resourceDiskSnapshot is the resource implementation.
type resourceDiskSnapshot struct {
client *decort.DecortClient
}
// Create creates the resource and sets the initial Terraform state.
func (r *resourceDiskSnapshot) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
// Get plan to create resource group
var plan models.ResourceDiskSnapshotModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDiskSnapshot: Error receiving the plan")
return
}
ctxCreateSnpsht := map[string]any{
"disk_id": plan.DiskID.ValueInt64(),
"label": plan.Label.ValueString(),
}
tflog.Info(ctx, "Create resourceDiskSnapshot: got plan successfully", ctxCreateSnpsht)
tflog.Info(ctx, "Create resourceDiskSnapshot: start creating", ctxCreateSnpsht)
// Set timeouts
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDiskSnapshot: Error set timeout")
return
}
tflog.Info(ctx, "Create resourceDiskSnapshot: set timeouts successfully", map[string]any{
"disk_id": plan.DiskID.ValueInt64(),
"label": plan.Label.ValueString(),
"createTimeout": createTimeout})
ctx, cancel := context.WithTimeout(ctx, createTimeout)
defer cancel()
// Check if input values are valid in the platform
_, diags = utilities.DiskSnapshotCheckPresence(ctx, &plan, r.client)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDiskSnapshot: disk snapshot does not exist")
return
}
tflog.Info(ctx, "Create resourceDiskSnapshot: snapshot successfully loaded", ctxCreateSnpsht)
if plan.Rollback.ValueBool() { // default is false
resp.Diagnostics.Append(utilities.RollbackDiskSnapshot(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDiskSnapshot: Error rollback snapshot")
return
}
}
tflog.Info(ctx, "Create resourceDiskSnapshot: resource creation is completed", ctxCreateSnpsht)
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.DiskSnapshotResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set data last update
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
// Set state to fully populated data
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
if resp.Diagnostics.HasError() {
return
}
}
// Read refreshes the Terraform state with the latest data.
func (r *resourceDiskSnapshot) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
// Get current state
var state models.ResourceDiskSnapshotModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskSnapshot: Error get state")
return
}
tflog.Info(ctx, "Read resourceDiskSnapshot: got state successfully", map[string]any{
"disk_id": state.DiskID.ValueInt64(),
"label": state.Label.ValueString(),
})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskSnapshot: Error set timeout")
return
}
tflog.Info(ctx, "Read resourceDiskSnapshot: set timeouts successfully", map[string]any{
"disk_id": state.DiskID.ValueInt64(),
"label": state.Label.ValueString(),
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Overwrite items with refreshed state
resp.Diagnostics.Append(flattens.DiskSnapshotResource(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskSnapshot: Error flatten disk snapshot")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskSnapshot: Error set state")
return
}
tflog.Info(ctx, "End read resourceDiskSnapshot")
}
// Update updates the resource and sets the updated Terraform state on success.
func (r *resourceDiskSnapshot) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
// Retrieve values from plan
var plan models.ResourceDiskSnapshotModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskSnapshot: Error receiving the plan")
return
}
ctxSnpsht := map[string]any{
"disk_id": plan.DiskID.ValueInt64(),
"label": plan.Label.ValueString(),
}
tflog.Info(ctx, "Update resourceDiskSnapshot: got plan successfully", ctxSnpsht)
// Retrieve values from state
var state models.ResourceDiskSnapshotModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskSnapshot: Error receiving the state")
return
}
tflog.Info(ctx, "Update resourceDiskSnapshot: got state successfully", ctxSnpsht)
// Set timeouts
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskSnapshot: Error set timeout")
return
}
tflog.Info(ctx, "Update resourceDiskSnapshot: set timeouts successfully", map[string]any{
"disk_id": plan.DiskID.ValueInt64(),
"label": plan.Label.ValueString(),
"updateTimeout": updateTimeout})
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
defer cancel()
if !plan.Rollback.Equal(state.Rollback) && plan.Rollback.ValueBool() {
resp.Diagnostics.Append(utilities.RollbackDiskSnapshot(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskSnapshot: Error rollback snapshot")
return
}
}
tflog.Info(ctx, "Update resourceDiskSnapshot: disk snapshot update is completed", ctxSnpsht)
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.DiskSnapshotResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set data last update
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
// Set state to fully populated data
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
if resp.Diagnostics.HasError() {
return
}
}
// Delete deletes the resource and removes the Terraform state on success.
func (r *resourceDiskSnapshot) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
// Get current state
var state models.ResourceDiskSnapshotModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceDiskSnapshot: Error get state")
return
}
tflog.Info(ctx, "Delete resourceDiskSnapshot: got state successfully", map[string]any{
"disk_id": state.DiskID.ValueInt64(),
"label": state.Label.ValueString()})
// Set timeouts
deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceDiskSnapshot: Error set timeout")
return
}
tflog.Info(ctx, "Delete resourceDiskSnapshot: set timeouts successfully", map[string]any{
"disk_id": state.DiskID.ValueInt64(),
"label": state.Label.ValueString(),
"deleteTimeout": deleteTimeout})
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
defer cancel()
// Check if input values are valid in the platform
_, diags = utilities.DiskSnapshotCheckPresence(ctx, &state, r.client)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceDiskSnapshot: disk snapshot does not exist")
return
}
delReq := disks.SnapshotDeleteRequest{
DiskID: uint64(state.DiskID.ValueInt64()),
Label: state.Label.ValueString(),
}
tflog.Info(ctx, "Delete resourceDiskSnapshot: before call CloudAPI().Disks().SnapshotDelete", map[string]any{"req": delReq})
_, err := r.client.CloudAPI().Disks().SnapshotDelete(ctx, delReq)
if err != nil {
resp.Diagnostics.AddError("Delete resourceDiskSnapshot: Error deleting disk with error: ", err.Error())
return
}
tflog.Info(ctx, "End delete resourceDiskSnapshot", map[string]any{
"disk_id": state.Id.ValueString(),
"label": state.Label.ValueString()})
}
// Schema defines the schema for the resource.
func (r *resourceDiskSnapshot) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaResourceDiskSnapshot(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
},
}
}
// Metadata returns the resource type name.
func (r *resourceDiskSnapshot) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_disk_snapshot"
}
// Configure adds the provider configured client to the resource.
func (r *resourceDiskSnapshot) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure resourceDiskSnapshot")
r.client = client.Resource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure resourceDiskSnapshot successfully")
}
func (r *resourceDiskSnapshot) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
// Retrieve import ID and save to id attribute
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
}

View File

@@ -0,0 +1,197 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceDisk() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"disk_id": schema.Int64Attribute{
Required: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"account_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"acl": schema.StringAttribute{
Computed: true,
},
"computes": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"compute_id": schema.StringAttribute{
Computed: true,
},
"compute_name": schema.StringAttribute{
Computed: true,
},
},
},
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"destruction_time": schema.Int64Attribute{
Computed: true,
},
"devicename": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"images": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"iotune": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"read_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"read_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"size_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
},
},
"disk_name": schema.StringAttribute{
Computed: true,
},
"order": schema.Int64Attribute{
Computed: true,
},
"params": schema.StringAttribute{
Computed: true,
},
"parent_id": schema.Int64Attribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"pool": schema.StringAttribute{
Computed: true,
},
"present_to": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"purge_time": schema.Int64Attribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"sep_id": schema.Int64Attribute{
Computed: true,
},
"sep_type": schema.StringAttribute{
Computed: true,
},
"shareable": schema.BoolAttribute{
Computed: true,
},
"size_max": schema.Int64Attribute{
Computed: true,
},
"size_used": schema.Float64Attribute{
Computed: true,
},
"snapshots": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vmid": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,265 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceDiskList() map[string]schema.Attribute {
return map[string]schema.Attribute{
// optional attributes
"by_id": schema.Int64Attribute{
Optional: true,
Description: "Find by ID",
},
"name": schema.StringAttribute{
Optional: true,
Description: "Find by name",
},
"account_name": schema.StringAttribute{
Optional: true,
Description: "Find by account name",
},
"disk_max_size": schema.Int64Attribute{
Optional: true,
Description: "Find by max disk size",
},
"status": schema.StringAttribute{
Optional: true,
Description: "Find by status",
},
"shared": schema.BoolAttribute{
Optional: true,
Description: "Find by shared field",
},
"account_id": schema.Int64Attribute{
Optional: true,
Description: "ID of the account the disks belong to",
},
"type": schema.StringAttribute{
Optional: true,
Description: "type of the disks",
},
"sep_id": schema.Int64Attribute{
Optional: true,
Description: "find by sep ID",
},
"pool_name": schema.StringAttribute{
Optional: true,
Description: "find by pool name",
},
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "Page number",
},
"size": schema.Int64Attribute{
Optional: true,
Description: "Page size",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"account_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"acl": schema.StringAttribute{
Computed: true,
},
"computes": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"compute_id": schema.StringAttribute{
Computed: true,
},
"compute_name": schema.StringAttribute{
Computed: true,
},
},
},
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"destruction_time": schema.Int64Attribute{
Computed: true,
},
"devicename": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"disk_id": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"images": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"iotune": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"read_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"read_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"size_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
},
},
"machine_id": schema.Int64Attribute{
Computed: true,
},
"machine_name": schema.StringAttribute{
Computed: true,
},
"disk_name": schema.StringAttribute{
Computed: true,
},
"order": schema.Int64Attribute{
Computed: true,
},
"params": schema.StringAttribute{
Computed: true,
},
"parent_id": schema.Int64Attribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"pool": schema.StringAttribute{
Computed: true,
},
"present_to": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"purge_time": schema.Int64Attribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"sep_id": schema.Int64Attribute{
Computed: true,
},
"sep_type": schema.StringAttribute{
Computed: true,
},
"shareable": schema.BoolAttribute{
Computed: true,
},
"size_max": schema.Int64Attribute{
Computed: true,
},
"size_used": schema.Float64Attribute{
Computed: true,
},
"snapshots": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vmid": schema.Int64Attribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,253 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceDiskListDeleted() map[string]schema.Attribute {
return map[string]schema.Attribute{
// optional attributes
"by_id": schema.Int64Attribute{
Optional: true,
Description: "Find by ID",
},
"name": schema.StringAttribute{
Optional: true,
Description: "Find by name",
},
"account_name": schema.StringAttribute{
Optional: true,
Description: "Find by account name",
},
"disk_max_size": schema.Int64Attribute{
Optional: true,
Description: "Find by max disk size",
},
"shared": schema.BoolAttribute{
Optional: true,
Description: "Find by shared field",
},
"account_id": schema.Int64Attribute{
Optional: true,
Description: "ID of the account the disks belong to",
},
"type": schema.StringAttribute{
Optional: true,
Description: "type of the disks",
},
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "Page number",
},
"size": schema.Int64Attribute{
Optional: true,
Description: "Page size",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"account_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"acl": schema.StringAttribute{
Computed: true,
},
"computes": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"compute_id": schema.StringAttribute{
Computed: true,
},
"compute_name": schema.StringAttribute{
Computed: true,
},
},
},
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"destruction_time": schema.Int64Attribute{
Computed: true,
},
"devicename": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"disk_id": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"images": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"iotune": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"read_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"read_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"size_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
},
},
"machine_id": schema.Int64Attribute{
Computed: true,
},
"machine_name": schema.StringAttribute{
Computed: true,
},
"disk_name": schema.StringAttribute{
Computed: true,
},
"order": schema.Int64Attribute{
Computed: true,
},
"params": schema.StringAttribute{
Computed: true,
},
"parent_id": schema.Int64Attribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"pool": schema.StringAttribute{
Computed: true,
},
"present_to": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"purge_time": schema.Int64Attribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"sep_id": schema.Int64Attribute{
Computed: true,
},
"sep_type": schema.StringAttribute{
Computed: true,
},
"shareable": schema.BoolAttribute{
Computed: true,
},
"size_max": schema.Int64Attribute{
Computed: true,
},
"size_used": schema.Float64Attribute{
Computed: true,
},
"snapshots": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vmid": schema.Int64Attribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,36 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceDiskListTypes() map[string]schema.Attribute {
return map[string]schema.Attribute{
// optional attributes
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "Page number",
},
"size": schema.Int64Attribute{
Optional: true,
Description: "Page size",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"types": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,62 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceDiskListTypesDetailed() map[string]schema.Attribute {
return map[string]schema.Attribute{
// optional attributes
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "Page number",
},
"size": schema.Int64Attribute{
Optional: true,
Description: "Page size",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"pools": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"name": schema.StringAttribute{
Computed: true,
},
"system": schema.StringAttribute{
Computed: true,
},
"types": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
},
},
},
"sep_id": schema.Int64Attribute{
Computed: true,
},
"sep_name": schema.StringAttribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,265 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceDiskListUnattached() map[string]schema.Attribute {
return map[string]schema.Attribute{
// optional attributes
"by_id": schema.Int64Attribute{
Optional: true,
Description: "Find by ID",
},
"account_name": schema.StringAttribute{
Optional: true,
Description: "Find by account name",
},
"disk_max_size": schema.Int64Attribute{
Optional: true,
Description: "Find by max disk size",
},
"status": schema.StringAttribute{
Optional: true,
Description: "Find by status",
},
"account_id": schema.Int64Attribute{
Optional: true,
Description: "ID of the account the disks belong to",
},
"sep_id": schema.Int64Attribute{
Optional: true,
Description: "find by sep ID",
},
"pool_name": schema.StringAttribute{
Optional: true,
Description: "find by pool name",
},
"type": schema.StringAttribute{
Optional: true,
Description: "type of the disks",
},
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "Page number",
},
"size": schema.Int64Attribute{
Optional: true,
Description: "Page size",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"ckey": schema.StringAttribute{
Computed: true,
},
"meta": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"account_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"acl": schema.StringAttribute{
Computed: true,
},
"boot_partition": schema.Int64Attribute{
Computed: true,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"destruction_time": schema.Int64Attribute{
Computed: true,
},
"disk_path": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"disk_id": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"images": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"iotune": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"read_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"read_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"size_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
},
},
"iqn": schema.StringAttribute{
Computed: true,
},
"login": schema.StringAttribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"disk_name": schema.StringAttribute{
Computed: true,
},
"order": schema.Int64Attribute{
Computed: true,
},
"params": schema.StringAttribute{
Computed: true,
},
"parent_id": schema.Int64Attribute{
Computed: true,
},
"passwd": schema.StringAttribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"pool": schema.StringAttribute{
Computed: true,
},
"purge_attempts": schema.Int64Attribute{
Computed: true,
},
"purge_time": schema.Int64Attribute{
Computed: true,
},
"reality_device_number": schema.Int64Attribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"sep_id": schema.Int64Attribute{
Computed: true,
},
"shareable": schema.BoolAttribute{
Computed: true,
},
"size_max": schema.Int64Attribute{
Computed: true,
},
"size_used": schema.Float64Attribute{
Computed: true,
},
"snapshots": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vmid": schema.Int64Attribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,225 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceDiskReplication() map[string]schema.Attribute {
return map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Required: true,
Description: "Id of primary disk",
},
"replica_disk_id": schema.Int64Attribute{
Required: true,
Description: "Id of secondary disk",
},
"status_replication": schema.StringAttribute{
Computed: true,
Description: "Status of replication",
},
"account_id": schema.Int64Attribute{
Computed: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"account_name": schema.StringAttribute{
Computed: true,
Description: "The name of the subscriber '(account') to whom this disk belongs",
},
"acl": schema.StringAttribute{
Computed: true,
},
"computes": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"compute_id": schema.StringAttribute{
Computed: true,
},
"compute_name": schema.StringAttribute{
Computed: true,
},
},
},
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"destruction_time": schema.Int64Attribute{
Computed: true,
},
"devicename": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"images": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"iotune": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"read_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"read_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"size_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
},
},
"disk_name": schema.StringAttribute{
Computed: true,
},
"order": schema.Int64Attribute{
Computed: true,
},
"params": schema.StringAttribute{
Computed: true,
},
"parent_id": schema.Int64Attribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"pool": schema.StringAttribute{
Computed: true,
},
"present_to": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"purge_time": schema.Int64Attribute{
Computed: true,
},
"replication": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Computed: true,
},
"pool_id": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"self_volume_id": schema.StringAttribute{
Computed: true,
},
"storage_id": schema.StringAttribute{
Computed: true,
},
"volume_id": schema.StringAttribute{
Computed: true,
},
},
},
"res_id": schema.StringAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"sep_id": schema.Int64Attribute{
Computed: true,
},
"sep_type": schema.StringAttribute{
Computed: true,
},
"shareable": schema.BoolAttribute{
Computed: true,
},
"size_max": schema.Int64Attribute{
Computed: true,
},
"size_used": schema.Float64Attribute{
Computed: true,
},
"snapshots": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vmid": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,39 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceDiskSnapshot() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"disk_id": schema.Int64Attribute{
Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"label": schema.StringAttribute{
Required: true,
Description: "Name of the snapshot",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,45 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceDiskSnapshotList() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"disk_id": schema.Int64Attribute{
Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
}
}

View File

@@ -0,0 +1,269 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaResourceDisk() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"account_id": schema.Int64Attribute{
Required: true,
Description: "ID of the account",
},
"disk_name": schema.StringAttribute{
Required: true,
Description: "Iname of disk",
},
"size_max": schema.Int64Attribute{
Required: true,
Description: "size in GB, default is 10",
},
"gid": schema.Int64Attribute{
Required: true,
Description: "ID of the grid (platform)",
},
// optional attributes
"desc": schema.StringAttribute{
Optional: true,
Computed: true,
Description: "description of disk",
},
"pool": schema.StringAttribute{
Optional: true,
Computed: true,
Description: "Pool for disk location",
},
"sep_id": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Storage endpoint provider ID to create disk",
},
"type": schema.StringAttribute{
Optional: true,
Computed: true,
Validators: []validator.String{
stringvalidator.OneOf("B", "D", "T"), // case is not ignored
},
Description: "(B;D;T) B=Boot;D=Data;T=Temp",
// default is D
},
"detach": schema.BoolAttribute{
Optional: true,
Description: "Detaching the disk from compute",
// default is false
},
"permanently": schema.BoolAttribute{
Optional: true,
Description: "Whether to completely delete the disk, works only with non attached disks",
// default is false
},
"reason": schema.StringAttribute{
Optional: true,
Description: "Reason for deletion",
},
"shareable": schema.BoolAttribute{
Optional: true,
Computed: true,
Description: "share disk",
},
"iotune": schema.SingleNestedAttribute{
Optional: true,
Computed: true,
Attributes: map[string]schema.Attribute{
"read_bytes_sec": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Number of bytes to read per second",
},
"read_bytes_sec_max": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Maximum number of bytes to read",
},
"read_iops_sec": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Number of io read operations per second",
},
"read_iops_sec_max": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Maximum number of io read operations",
},
"size_iops_sec": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Size of io operations",
},
"total_bytes_sec": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Total size bytes per second",
},
"total_bytes_sec_max": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Maximum total size of bytes per second",
},
"total_iops_sec": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Total number of io operations per second",
},
"total_iops_sec_max": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Maximum total number of io operations per second",
},
"write_bytes_sec": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Number of bytes to write per second",
},
"write_bytes_sec_max": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Maximum number of bytes to write per second",
},
"write_iops_sec": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Number of write operations per second",
},
"write_iops_sec_max": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Maximum number of write operations per second",
},
},
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"disk_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"acl": schema.StringAttribute{
Computed: true,
},
"computes": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"compute_id": schema.StringAttribute{
Computed: true,
},
"compute_name": schema.StringAttribute{
Computed: true,
},
},
},
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"destruction_time": schema.Int64Attribute{
Computed: true,
},
"devicename": schema.StringAttribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"images": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"last_updated": schema.StringAttribute{
Computed: true,
Description: "Timestamp of the last Terraform update of the disk resource.",
},
"order": schema.Int64Attribute{
Computed: true,
},
"params": schema.StringAttribute{
Computed: true,
},
"parent_id": schema.Int64Attribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"present_to": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"purge_time": schema.Int64Attribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"sep_type": schema.StringAttribute{
Computed: true,
},
"size_used": schema.Float64Attribute{
Computed: true,
},
"snapshots": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"vmid": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,253 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaResourceDiskReplication() map[string]schema.Attribute {
// required attributes
return map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Required: true,
},
"disk_name": schema.StringAttribute{
Required: true,
},
"sep_id": schema.Int64Attribute{
Required: true,
},
"pool_name": schema.StringAttribute{
Required: true,
},
// optional attributes
"pause": schema.BoolAttribute{
Optional: true,
},
"reverse": schema.BoolAttribute{
Optional: true,
},
"start": schema.BoolAttribute{
Optional: true,
},
"detach": schema.BoolAttribute{
Optional: true,
},
"permanently": schema.BoolAttribute{
Optional: true,
},
"reason": schema.StringAttribute{
Optional: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"replica_disk_id": schema.Int64Attribute{
Computed: true,
},
"status_replication": schema.StringAttribute{
Computed: true,
},
"account_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"acl": schema.StringAttribute{
Computed: true,
},
"computes": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"compute_id": schema.StringAttribute{
Computed: true,
},
"compute_name": schema.StringAttribute{
Computed: true,
},
},
},
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"destruction_time": schema.Int64Attribute{
Computed: true,
},
"devicename": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"images": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"iotune": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"read_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"read_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"size_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
},
},
"order": schema.Int64Attribute{
Computed: true,
},
"params": schema.StringAttribute{
Computed: true,
},
"parent_id": schema.Int64Attribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"pool": schema.StringAttribute{
Computed: true,
},
"present_to": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"purge_time": schema.Int64Attribute{
Computed: true,
},
"replication": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Computed: true,
},
"pool_id": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"self_volume_id": schema.StringAttribute{
Computed: true,
},
"storage_id": schema.StringAttribute{
Computed: true,
},
"volume_id": schema.StringAttribute{
Computed: true,
},
},
},
"res_id": schema.StringAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"sep_type": schema.StringAttribute{
Computed: true,
},
"shareable": schema.BoolAttribute{
Computed: true,
},
"size_max": schema.Int64Attribute{
Computed: true,
},
"size_used": schema.Float64Attribute{
Computed: true,
},
"snapshots": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vmid": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,58 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
)
func MakeSchemaResourceDiskSnapshot() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"disk_id": schema.Int64Attribute{
Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"label": schema.StringAttribute{
Required: true,
Description: "Name of the snapshot",
},
// optional attributes
"rollback": schema.BoolAttribute{
Optional: true,
Description: "Needed in order to make a snapshot rollback",
// default is false
},
"timestamp": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Snapshot time",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
// id is generated as "<disk_id>#<label>"
},
"last_updated": schema.StringAttribute{
Computed: true,
Description: "Timestamp of the last Terraform update of the disk resource.",
},
"guid": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,25 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
)
func DataSourceDiskCheckPresence(ctx context.Context, diskId uint64, c *decort.DecortClient) (*disks.RecordDisk, error) {
tflog.Info(ctx, fmt.Sprintf("DataSourceDiskCheckPresence: Get info about disk with ID - %v", diskId))
recordDisk, err := c.CloudAPI().Disks().Get(ctx, disks.GetRequest{DiskID: diskId})
if err != nil {
return nil, fmt.Errorf("cannot get info about disk with error: %w", err)
}
tflog.Info(ctx, "DataSourceDiskCheckPresence: response from CloudAPI().Disks().Get", map[string]any{
"disk_id": diskId,
"response": recordDisk})
return recordDisk, err
}

View File

@@ -0,0 +1,72 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
)
func DataSourceDiskListCheckPresence(ctx context.Context, state *models.DataSourceDiskListModel, c *decort.DecortClient) (*disks.ListDisks, diag.Diagnostics) {
diags := diag.Diagnostics{}
listReq := disks.ListRequest{}
if !state.ByID.IsNull() {
listReq.ByID = uint64(state.ByID.ValueInt64())
}
if !state.Name.IsNull() {
listReq.Name = state.Name.ValueString()
}
if !state.AccountName.IsNull() {
listReq.AccountName = state.AccountName.ValueString()
}
if !state.DiskMaxSize.IsNull() {
listReq.DiskMaxSize = state.DiskMaxSize.ValueInt64()
}
if !state.Status.IsNull() {
listReq.Status = state.Status.ValueString()
}
if !state.Shared.IsNull() {
listReq.Shared = state.Shared.ValueBool()
}
if !state.AccountID.IsNull() {
listReq.AccountID = uint64(state.AccountID.ValueInt64())
}
if !state.Type.IsNull() {
listReq.Type = state.Type.ValueString()
}
if !state.SEPID.IsNull() {
listReq.SEPID = uint64(state.SEPID.ValueInt64())
}
if !state.PoolName.IsNull() {
listReq.Pool = state.PoolName.ValueString()
}
if !state.SortBy.IsNull() {
listReq.SortBy = state.SortBy.ValueString()
}
if !state.Page.IsNull() {
listReq.Page = uint64(state.Page.ValueInt64())
}
if !state.Size.IsNull() {
listReq.Size = uint64(state.Size.ValueInt64())
}
tflog.Info(ctx, "DataSourceDiskListCheckPresence: before call CloudAPI().Disks().List", map[string]any{
"req": listReq,
})
diskList, err := c.CloudAPI().Disks().List(ctx, listReq)
if err != nil {
diags.AddError("Cannot get info about disk list", err.Error())
return nil, diags
}
tflog.Info(ctx, "DataSourceDiskListCheckPresence: got list successfully", map[string]any{
"entry_count": diskList.EntryCount,
})
return diskList, nil
}

View File

@@ -0,0 +1,63 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
)
func DataSourceDiskListDeletedCheckPresence(ctx context.Context, state *models.DataSourceDiskListDeletedModel, c *decort.DecortClient) (*disks.ListDisks, diag.Diagnostics) {
diags := diag.Diagnostics{}
listReq := disks.ListDeletedRequest{}
if !state.ByID.IsNull() {
listReq.ByID = uint64(state.ByID.ValueInt64())
}
if !state.Name.IsNull() {
listReq.Name = state.Name.ValueString()
}
if !state.AccountName.IsNull() {
listReq.AccountName = state.AccountName.ValueString()
}
if !state.DiskMaxSize.IsNull() {
listReq.DiskMaxSize = state.DiskMaxSize.ValueInt64()
}
if !state.Shared.IsNull() {
listReq.Shared = state.Shared.ValueBool()
}
if !state.AccountID.IsNull() {
listReq.AccountID = uint64(state.AccountID.ValueInt64())
}
if !state.Type.IsNull() {
listReq.Type = state.Type.ValueString()
}
if !state.SortBy.IsNull() {
listReq.SortBy = state.SortBy.ValueString()
}
if !state.Page.IsNull() {
listReq.Page = uint64(state.Page.ValueInt64())
}
if !state.Size.IsNull() {
listReq.Size = uint64(state.Size.ValueInt64())
}
tflog.Info(ctx, "DataSourceDiskListDeletedCheckPresence: before call CloudAPI().Disks().ListDeleted", map[string]any{
"req": listReq,
})
diskList, err := c.CloudAPI().Disks().ListDeleted(ctx, listReq)
if err != nil {
diags.AddError("Cannot get info about disk list", err.Error())
return nil, diags
}
tflog.Info(ctx, "DataSourceDiskListDeletedCheckPresence: got list successfully", map[string]any{
"entry_count": diskList.EntryCount,
})
return diskList, nil
}

View File

@@ -0,0 +1,42 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
)
func DataSourceDiskListTypesCheckPresence(ctx context.Context, state *models.DataSourceDiskListTypesModel, c *decort.DecortClient) (*disks.ListTypes, diag.Diagnostics) {
diags := diag.Diagnostics{}
listTypesReq := disks.ListTypesRequest{Detailed: false}
if !state.SortBy.IsNull() {
listTypesReq.SortBy = state.SortBy.ValueString()
}
if !state.Page.IsNull() {
listTypesReq.Page = uint64(state.Page.ValueInt64())
}
if !state.Size.IsNull() {
listTypesReq.Size = uint64(state.Size.ValueInt64())
}
tflog.Info(ctx, "DataSourceDiskListTypesCheckPresence: before call CloudAPI().Disks().ListTypes", map[string]any{
"req": listTypesReq,
})
listTypes, err := c.CloudAPI().Disks().ListTypes(ctx, listTypesReq)
if err != nil {
diags.AddError("Cannot get info about disk list types", err.Error())
return nil, diags
}
tflog.Info(ctx, "DataSourceDiskListTypesCheckPresence: got list successfully", map[string]any{
"entry_count": listTypes.EntryCount,
})
return listTypes, nil
}

View File

@@ -0,0 +1,42 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
)
func DataSourceDiskListTypesDetailedCheckPresence(ctx context.Context, state *models.DataSourceDiskListTypesDetailedModel, c *decort.DecortClient) (*disks.ListTypes, diag.Diagnostics) {
diags := diag.Diagnostics{}
listTypesReq := disks.ListTypesRequest{Detailed: true}
if !state.SortBy.IsNull() {
listTypesReq.SortBy = state.SortBy.ValueString()
}
if !state.Page.IsNull() {
listTypesReq.Page = uint64(state.Page.ValueInt64())
}
if !state.Size.IsNull() {
listTypesReq.Size = uint64(state.Size.ValueInt64())
}
tflog.Info(ctx, "DataSourceDiskListTypesDetailedCheckPresence: before call CloudAPI().Disks().ListTypes", map[string]any{
"req": listTypesReq,
})
listTypes, err := c.CloudAPI().Disks().ListTypes(ctx, listTypesReq)
if err != nil {
diags.AddError("Cannot get info about disk list types", err.Error())
return nil, diags
}
tflog.Info(ctx, "DataSourceDiskListTypesDetailedCheckPresence: got list successfully", map[string]any{
"entry_count": listTypes.EntryCount,
})
return listTypes, nil
}

View File

@@ -0,0 +1,66 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
)
func DataSourceDiskListUnattachedCheckPresence(ctx context.Context, state *models.DataSourceDiskListUnattachedModel, c *decort.DecortClient) (*disks.ListDisksUnattached, diag.Diagnostics) {
diags := diag.Diagnostics{}
listReq := disks.ListUnattachedRequest{}
if !state.ByID.IsNull() {
listReq.ByID = uint64(state.ByID.ValueInt64())
}
if !state.AccountName.IsNull() {
listReq.AccountName = state.AccountName.ValueString()
}
if !state.DiskMaxSize.IsNull() {
listReq.DiskMaxSize = state.DiskMaxSize.ValueInt64()
}
if !state.Status.IsNull() {
listReq.Status = state.Status.ValueString()
}
if !state.AccountID.IsNull() {
listReq.AccountID = uint64(state.AccountID.ValueInt64())
}
if !state.SepID.IsNull() {
listReq.SEPID = uint64(state.SepID.ValueInt64())
}
if !state.PoolName.IsNull() {
listReq.Pool = state.PoolName.ValueString()
}
if !state.Type.IsNull() {
listReq.Type = state.Type.ValueString()
}
if !state.SortBy.IsNull() {
listReq.SortBy = state.SortBy.ValueString()
}
if !state.Page.IsNull() {
listReq.Page = uint64(state.Page.ValueInt64())
}
if !state.Size.IsNull() {
listReq.Size = uint64(state.Size.ValueInt64())
}
tflog.Info(ctx, "DataSourceDiskListUnattachedCheckPresence: before call CloudAPI().Disks().ListUnattached", map[string]any{
"req": listReq,
})
diskList, err := c.CloudAPI().Disks().ListUnattached(ctx, listReq)
if err != nil {
diags.AddError("Cannot get info about disk list", err.Error())
return nil, diags
}
tflog.Info(ctx, "DataSourceDiskListUnattachedCheckPresence: got list successfully", map[string]any{
"entry_count": diskList.EntryCount,
})
return diskList, nil
}

View File

@@ -0,0 +1,34 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
)
func DataSourceDiskReplicationCheckPresence(ctx context.Context, state *models.RecordDiskModel, c *decort.DecortClient) (*disks.RecordDisk, *string, error) {
status, err := c.CloudAPI().Disks().ReplicationStatus(ctx, disks.ReplicationStatusRequest{DiskID: uint64(state.DiskId.ValueInt64())})
if err != nil {
return nil, nil, err
}
req := disks.GetRequest{}
if !state.DiskId.IsNull() && !state.DiskId.IsUnknown() {
req.DiskID = uint64(state.DiskId.ValueInt64())
} else {
req.DiskID = uint64(state.ID.ValueInt64())
}
tflog.Info(ctx, "DataSourceDiskReplicationCheckPresence: load disk")
disk, err := c.CloudAPI().Disks().Get(ctx, req)
if err != nil {
return nil, nil, err
}
return disk, &status, nil
}

View File

@@ -0,0 +1,43 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
)
func DataSourceDiskSnapshotCheckPresence(ctx context.Context, plan *models.DataSourceDiskSnapshotModel, c *decort.DecortClient) (*disks.ItemSnapshot, diag.Diagnostics) {
diags := diag.Diagnostics{}
diskId := uint64(plan.DiskID.ValueInt64())
label := plan.Label.ValueString()
tflog.Info(ctx, "Start DataSourceDiskSnapshotCheckPresence", map[string]any{"disk_id": diskId, "label": label})
tflog.Info(ctx, "DataSourceDiskSnapshotCheckPresence: before call CloudAPI().Disks().Get", map[string]any{"disk_id": diskId})
disk, err := c.CloudAPI().Disks().Get(ctx, disks.GetRequest{DiskID: diskId})
if err != nil {
diags.AddError(
fmt.Sprintf("Cannot get info about disk with disk_id %d", diskId),
err.Error(),
)
return nil, diags
}
tflog.Info(ctx, "DataSourceDiskSnapshotCheckPresence: response from CloudAPI().Disks().Get", map[string]any{"response": disk})
for _, sn := range disk.Snapshots {
if label == sn.Label {
return &sn, nil
}
}
diags.AddError(
"Snapshot not found",
fmt.Sprintf("Snapshot with label %s for disk with disk_id %d not found", label, diskId),
)
return nil, diags
}

View File

@@ -0,0 +1,25 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
)
func DiskSnapshotListCheckPresence(ctx context.Context, diskId uint64, c *decort.DecortClient) (*disks.ListSnapshots, error) {
tflog.Info(ctx, fmt.Sprintf("DiskSnapshotListCheckPresence: Get info about disk snapshot list with disk ID - %v", diskId))
recordDisk, err := c.CloudAPI().Disks().Get(ctx, disks.GetRequest{DiskID: diskId})
if err != nil {
return nil, fmt.Errorf("cannot get info about disk with error: %w", err)
}
tflog.Info(ctx, "DiskSnapshotListCheckPresence: response from CloudAPI().Disks().Get", map[string]any{
"disk_id": diskId,
"response": recordDisk})
return &recordDisk.Snapshots, err
}

View File

@@ -0,0 +1,373 @@
package utilities
import (
"context"
"fmt"
"strconv"
"time"
"github.com/hashicorp/terraform-plugin-framework/types"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
)
// DiskCheckPresence checks if disk with diskId exists
func DiskCheckPresence(ctx context.Context, diskId uint64, c *decort.DecortClient) (*disks.RecordDisk, error) {
tflog.Info(ctx, fmt.Sprintf("Get info about disk with ID - %v", diskId))
diskRecord, err := c.CloudAPI().Disks().Get(ctx, disks.GetRequest{DiskID: diskId})
if err != nil {
return nil, fmt.Errorf("cannot get info about disk with error: %w", err)
}
tflog.Info(ctx, "DiskCheckPresence resourceDisk: response from CloudAPI().Disks().Get", map[string]any{"disk_id": diskId, "response": diskRecord})
return diskRecord, err
}
// CreateRequestResourceDisk generates disk create request from plan
func CreateRequestResourceDisk(ctx context.Context, plan *models.ResourceDiskModel) disks.CreateRequest {
tflog.Info(ctx, "Start CreateRequestResourceDisk", map[string]any{
"account_id": plan.AccountID.ValueInt64(),
"disk_name": plan.DiskName.ValueString(),
"size_max": plan.SizeMax.ValueInt64(),
"gid": plan.GID.ValueInt64(),
})
// set up required parameters in disk create request
createReq := disks.CreateRequest{
AccountID: uint64(plan.AccountID.ValueInt64()),
Name: plan.DiskName.ValueString(),
Size: uint64(plan.SizeMax.ValueInt64()),
GID: uint64(plan.GID.ValueInt64()),
}
if plan.Type.IsUnknown() {
createReq.Type = "D" // default value
} else {
createReq.Type = plan.Type.ValueString()
}
if !plan.SEPID.IsUnknown() {
createReq.SEPID = uint64(plan.SEPID.ValueInt64())
}
if !plan.Pool.IsUnknown() {
createReq.Pool = plan.Pool.ValueString()
}
if !plan.Description.IsUnknown() {
createReq.Description = plan.Description.ValueString()
}
return createReq
}
// LimitIOCreateDisk sets IO limits that user specified in iotune field for created resource.
// In case of failure returns warnings.
func LimitIOCreateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
limitIOReq := disks.LimitIORequest{
DiskID: diskId,
}
var iotunePlan models.IOTuneModel
// plan.IOTune is not null as it was checked before call
tflog.Info(ctx, "LimitIOCreateDisk: new iotune specified", map[string]any{"disk_id": diskId})
diags.Append(plan.IOTune.As(ctx, &iotunePlan, basetypes.ObjectAsOptions{})...)
if diags.HasError() {
tflog.Error(ctx, "LimitIOCreateDisk: cannot populate iotune with plan.IOTune object element")
return diags
}
limitIOReq.IOPS = uint64(iotunePlan.TotalIOPSSec.ValueInt64())
limitIOReq.ReadBytesSec = uint64(iotunePlan.ReadBytesSec.ValueInt64())
limitIOReq.ReadBytesSecMax = uint64(iotunePlan.ReadBytesSecMax.ValueInt64())
limitIOReq.ReadIOPSSec = uint64(iotunePlan.ReadIOPSSec.ValueInt64())
limitIOReq.ReadIOPSSecMax = uint64(iotunePlan.ReadIOPSSecMax.ValueInt64())
limitIOReq.SizeIOPSSec = uint64(iotunePlan.SizeIOPSSec.ValueInt64())
limitIOReq.TotalBytesSec = uint64(iotunePlan.TotalBytesSec.ValueInt64())
limitIOReq.TotalBytesSecMax = uint64(iotunePlan.TotalBytesSecMax.ValueInt64())
limitIOReq.TotalIOPSSecMax = uint64(iotunePlan.TotalIOPSSecMax.ValueInt64())
limitIOReq.TotalIOPSSec = uint64(iotunePlan.TotalIOPSSec.ValueInt64())
limitIOReq.WriteBytesSec = uint64(iotunePlan.WriteBytesSec.ValueInt64())
limitIOReq.WriteBytesSecMax = uint64(iotunePlan.WriteBytesSecMax.ValueInt64())
limitIOReq.WriteIOPSSec = uint64(iotunePlan.WriteIOPSSec.ValueInt64())
limitIOReq.WriteIOPSSecMax = uint64(iotunePlan.WriteIOPSSecMax.ValueInt64())
tflog.Info(ctx, "LimitIOCreateDisk: before calling CloudAPI().Disks().LimitIO", map[string]any{
"disk_id": diskId,
"limitIOReq": limitIOReq})
res, err := c.CloudAPI().Disks().LimitIO(ctx, limitIOReq)
if err != nil {
diags.AddWarning("LimitIOCreateDisk: Unable to limit io for Disk",
err.Error())
}
tflog.Info(ctx, "LimitIOCreateDisk: response from CloudAPI().Disks().LimitIO", map[string]any{
"disk_id": diskId,
"response": res})
return diags
}
// ShareableCreateDisk shares disk.
// In case of failure returns warnings.
func ShareableCreateDisk(ctx context.Context, diskId uint64, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
tflog.Info(ctx, "ShareableCreateDisk: before calling CloudAPI().Disks().Share", map[string]any{"disk_id": diskId})
res, err := c.CloudAPI().Disks().Share(ctx, disks.ShareRequest{DiskID: diskId})
if err != nil {
diags.AddWarning("ShareableCreateDisk: Unable to share Disk",
err.Error())
}
tflog.Info(ctx, "ShareableCreateDisk: response from CloudAPI().Disks().Share", map[string]any{
"disk_id": diskId,
"response": res})
return diags
}
// DiskReadStatus loads disk resource by ids id, gets it current status. Performs restore and enable if needed for
// Deleted status.
// In case of failure returns errors.
func DiskReadStatus(ctx context.Context, state *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "DiskReadStatus: Read status disk with ID", map[string]any{"disk_id": state.Id.ValueString()})
diags := diag.Diagnostics{}
diskId, err := strconv.ParseUint(state.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("DiskReadStatus: Cannot parse disk ID from state", err.Error())
return diags
}
recordDisk, err := DiskCheckPresence(ctx, diskId, c)
if err != nil {
diags.AddError("DiskReadStatus: Unable to Read Disk before status check", err.Error())
return diags
}
// check resource status
switch recordDisk.Status {
case status.Modeled:
diags.AddError(
"Disk is in status Modeled",
"please, contact support for more information",
)
return diags
case status.Deleted:
// attempt to restore disk
tflog.Info(ctx, "DiskReadStatus: disk with status.Deleted is being read, attempt to restore it", map[string]any{
"disk_id": recordDisk.ID,
"status": recordDisk.Status})
diags.Append(RestoreDisk(ctx, diskId, c)...)
if diags.HasError() {
tflog.Error(ctx, "DiskReadStatus: cannot restore disk")
return diags
}
tflog.Info(ctx, "DiskReadStatus: disk restored successfully", map[string]any{"disk_id": diskId})
state.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
case status.Destroyed, status.Purged:
diags.AddError(
"DiskReadStatus: Disk is in status Destroyed or Purged",
fmt.Sprintf("the resource with disk_id %d cannot be read because it has been destroyed or purged", diskId),
)
return diags
}
return nil
}
// RestoreDisk performs disk Restore request.
// Returns error in case of failures.
func RestoreDisk(ctx context.Context, diskId uint64, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
restoreReq := disks.RestoreRequest{
DiskID: diskId,
Reason: "Terraform automatic restore",
}
tflog.Info(ctx, "RestoreDisk: before calling CloudAPI().Disks().Restore", map[string]any{"diskId": diskId, "req": restoreReq})
res, err := c.CloudAPI().Disks().Restore(ctx, restoreReq)
if err != nil {
diags.AddError(
"RestoreDisk: cannot restore disk",
err.Error(),
)
return diags
}
tflog.Info(ctx, "RestoreDisk: response from CloudAPI().Disks().Restore", map[string]any{"disk_id": diskId, "response": res})
return nil
}
// SizeMaxUpdateDisk resizes disk.
// Returns error in case of failures.
func SizeMaxUpdateDisk(ctx context.Context, diskId uint64, plan, state *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
var diags diag.Diagnostics
resizeReq := disks.ResizeRequest{
DiskID: diskId,
}
// check if resize request is valid
if plan.SizeMax.ValueInt64() < state.SizeMax.ValueInt64() {
diags.AddError(
"SizeMaxUpdateDisk: reducing disk size is not allowed",
fmt.Sprintf("disk with id %s has state size %d, plan size %d",
plan.Id.ValueString(),
state.SizeMax.ValueInt64(),
plan.SizeMax.ValueInt64()))
return diags
}
resizeReq.Size = uint64(plan.SizeMax.ValueInt64())
tflog.Info(ctx, "SizeMaxUpdateDisk: before calling CloudAPI().Disks().Resize2", map[string]any{
"disk_id": plan.Id.ValueString(),
"size_max_state": state.SizeMax.ValueInt64(),
"size_max_plan": plan.SizeMax.ValueInt64(),
"req": resizeReq,
})
res, err := c.CloudAPI().Disks().Resize2(ctx, resizeReq)
if err != nil {
diags.AddError("can not resize disk", err.Error())
return diags
}
tflog.Info(ctx, "SizeMaxUpdateDisk: response from CloudAPI().Disks().Resize2", map[string]any{
"disk_id": plan.Id.ValueString(),
"response": res})
return nil
}
// NameUpdateDisk renames disk.
// Returns error in case of failures.
func NameUpdateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
var diags diag.Diagnostics
renameReq := disks.RenameRequest{
DiskID: diskId,
Name: plan.DiskName.ValueString(),
}
tflog.Info(ctx, "NameUpdateDisk: before calling CloudAPI().Disks().Rename", map[string]any{
"disk_id": plan.Id.ValueString(),
"disk_name_plan": plan.DiskName.ValueString(),
"req": renameReq,
})
res, err := c.CloudAPI().Disks().Rename(ctx, renameReq)
if err != nil {
diags.AddError("NameUpdateDisk: can not rename disk", err.Error())
return diags
}
tflog.Info(ctx, "NameUpdateDisk: response from CloudAPI().Disks().Rename", map[string]any{
"disk_id": plan.Id.ValueString(),
"response": res})
return nil
}
// LimitIOUpdateDisk changes IO limits that user specified in iotune field for updated resource.
// In case of failure returns errors.
func LimitIOUpdateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
limitIOReq := disks.LimitIORequest{
DiskID: diskId,
}
var iotunePlan models.IOTuneModel
// plan.IOTune is not null as it was checked before call
tflog.Info(ctx, "LimitIOUpdateDisk: new iotune specified", map[string]any{"disk_id": diskId})
diags.Append(plan.IOTune.As(ctx, &iotunePlan, basetypes.ObjectAsOptions{})...)
if diags.HasError() {
tflog.Error(ctx, "LimitIOUpdateDisk: cannot populate iotune with plan.IOTune object element")
return diags
}
limitIOReq.IOPS = uint64(iotunePlan.TotalIOPSSec.ValueInt64())
limitIOReq.ReadBytesSec = uint64(iotunePlan.ReadBytesSec.ValueInt64())
limitIOReq.ReadBytesSecMax = uint64(iotunePlan.ReadBytesSecMax.ValueInt64())
limitIOReq.ReadIOPSSec = uint64(iotunePlan.ReadIOPSSec.ValueInt64())
limitIOReq.ReadIOPSSecMax = uint64(iotunePlan.ReadIOPSSecMax.ValueInt64())
limitIOReq.SizeIOPSSec = uint64(iotunePlan.SizeIOPSSec.ValueInt64())
limitIOReq.TotalBytesSec = uint64(iotunePlan.TotalBytesSec.ValueInt64())
limitIOReq.TotalBytesSecMax = uint64(iotunePlan.TotalBytesSecMax.ValueInt64())
limitIOReq.TotalIOPSSecMax = uint64(iotunePlan.TotalIOPSSecMax.ValueInt64())
limitIOReq.TotalIOPSSec = uint64(iotunePlan.TotalIOPSSec.ValueInt64())
limitIOReq.WriteBytesSec = uint64(iotunePlan.WriteBytesSec.ValueInt64())
limitIOReq.WriteBytesSecMax = uint64(iotunePlan.WriteBytesSecMax.ValueInt64())
limitIOReq.WriteIOPSSec = uint64(iotunePlan.WriteIOPSSec.ValueInt64())
limitIOReq.WriteIOPSSecMax = uint64(iotunePlan.WriteIOPSSecMax.ValueInt64())
tflog.Info(ctx, "LimitIOUpdateDisk: before calling CloudAPI().Disks().LimitIO", map[string]any{
"disk_id": diskId,
"limitIOReq": limitIOReq})
res, err := c.CloudAPI().Disks().LimitIO(ctx, limitIOReq)
if err != nil {
diags.AddError("LimitIOUpdateDisk: Unable to limit io for Disk",
err.Error())
return diags
}
tflog.Info(ctx, "LimitIOUpdateDisk: response from CloudAPI().Disks().LimitIO", map[string]any{
"disk_id": diskId,
"response": res})
return nil
}
// ShareableUpdateDisk shares or unshares disk.
// In case of failure returns errors.
func ShareableUpdateDisk(ctx context.Context, diskId uint64, share bool, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
// share
if share {
tflog.Info(ctx, "ShareableUpdateDisk: before calling CloudAPI().Disks().Share", map[string]any{"disk_id": diskId})
res, err := c.CloudAPI().Disks().Share(ctx, disks.ShareRequest{DiskID: diskId})
if err != nil {
diags.AddError("ShareableUpdateDisk: Unable to share Disk",
err.Error())
return diags
}
tflog.Info(ctx, "ShareableUpdateDisk: response from CloudAPI().Disks().Share", map[string]any{
"disk_id": diskId,
"response": res})
}
// unshare
if !share {
tflog.Info(ctx, "ShareableUpdateDisk: before calling CloudAPI().Disks().Unshare", map[string]any{"disk_id": diskId})
res, err := c.CloudAPI().Disks().Unshare(ctx, disks.UnshareRequest{DiskID: diskId})
if err != nil {
diags.AddError("ShareableUpdateDisk: Unable to unshare Disk",
err.Error())
return diags
}
tflog.Info(ctx, "ShareableUpdateDisk: response from CloudAPI().Disks().Unshare", map[string]any{
"disk_id": diskId,
"response": res})
}
return nil
}

View File

@@ -0,0 +1,172 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
)
func UtilityDiskReplicationUpdateStartStop(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
diskId := uint64(state.DiskId.ValueInt64())
targetDiskId := uint64(state.ReplicationId.ValueInt64())
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: start update for disk replica with ID: %d", diskId))
ok := !(state.Start.IsNull() || state.Start.IsUnknown())
start := state.Start.ValueBool()
if ok && start {
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: start disk replication from Disk with ID: %d to Disk with ID: %d", diskId, targetDiskId))
req := disks.ReplicationStartRequest{
DiskID: diskId,
TargetDiskID: targetDiskId,
}
_, err := c.CloudAPI().Disks().ReplicationStart(ctx, req)
if err != nil {
diags.AddError("UtilityDiskReplicationUpdateStartStop: Unable to start replicate disk", err.Error())
return diags
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: start disk replication from Disk with ID: %d to Disk with ID: %d, complete", diskId, targetDiskId))
}
if ok && !start {
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: stop disk replication from Disk with ID: %d to Disk with ID: %d", targetDiskId, diskId))
req := disks.ReplicationStopRequest{
DiskID: targetDiskId,
}
_, err := c.CloudAPI().Disks().ReplicationStop(ctx, req)
if err != nil {
diags.AddError("UtilityDiskReplicationUpdateStartStop: Unable to stop replicate disk", err.Error())
return diags
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: stop disk replication from Disk with ID: %d to Disk with ID: %d, complete", targetDiskId, diskId))
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: complete update for disk replica with ID: %d", diskId))
return nil
}
func UtilityDiskReplicationUpdatePause(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
diskId := uint64(state.DiskId.ValueInt64())
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: start update for disk replica with ID: %d", diskId))
pause := state.Pause.ValueBool()
ok := !(state.Pause.IsNull() || state.Pause.IsUnknown())
if ok && pause {
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: pause disk replication with ID: %d", diskId))
req := disks.ReplicationSuspendRequest{
DiskID: diskId,
}
_, err := c.CloudAPI().Disks().ReplicationSuspend(ctx, req)
if err != nil {
diags.AddError("utilityDiskReplicationUpdatePause: Unable to pause disk", err.Error())
return diags
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: pause disk replication with ID: %d, complete", diskId))
}
if ok && !pause {
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: resume disk replication with ID: %d", diskId))
req := disks.ReplicationResumeRequest{
DiskID: diskId,
}
_, err := c.CloudAPI().Disks().ReplicationResume(ctx, req)
if err != nil {
diags.AddError("utilityDiskReplicationUpdatePause: Unable to resume disk", err.Error())
return diags
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: resume disk replication with ID: %d, complete", diskId))
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: complete update for disk replica with ID: %d", diskId))
return nil
}
func UtilityDiskReplicationUpdateReverse(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
diskId := uint64(state.DiskId.ValueInt64())
targetDiskId := uint64(state.ReplicationId.ValueInt64())
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: start update for disk replica with ID: %d", diskId))
reverse := state.Reverse.ValueBool()
ok := !(state.Reverse.IsNull() || state.Reverse.IsUnknown())
if ok && reverse {
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: reverse disk replication from Disk with ID: %d to Disk with ID: %d", diskId, targetDiskId))
req := disks.ReplicationReverseRequest{
DiskID: diskId,
}
_, err := c.CloudAPI().Disks().ReplicationReverse(ctx, req)
if err != nil {
diags.AddError("utilityDiskReplicationUpdateReverse: Unable to reverse disk", err.Error())
return diags
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: reverse disk replication from Disk with ID: %d to Disk with ID: %d, complete", diskId, targetDiskId))
}
if ok && !reverse {
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: reverse disk replication from Disk with ID: %d to Disk with ID: %d", targetDiskId, diskId))
req := disks.ReplicationReverseRequest{
DiskID: targetDiskId,
}
_, err := c.CloudAPI().Disks().ReplicationReverse(ctx, req)
if err != nil {
diags.AddError("utilityDiskReplicationUpdateReverse: Unable to reverse disk", err.Error())
return diags
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: reverse disk replication from Disk with ID: %d to Disk with ID: %d, complete", targetDiskId, diskId))
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: complete update for disk replica with ID: %d", diskId))
return nil
}
func ResourceDiskReplicationCheckPresence(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) (*disks.RecordDisk, *string, error) {
status, err := c.CloudAPI().Disks().ReplicationStatus(ctx, disks.ReplicationStatusRequest{DiskID: uint64(state.DiskId.ValueInt64())})
if err != nil {
return nil, nil, err
}
req := disks.GetRequest{}
if !state.DiskId.IsNull() && !state.DiskId.IsUnknown() {
req.DiskID = uint64(state.DiskId.ValueInt64())
} else {
req.DiskID = uint64(state.ReplicationId.ValueInt64())
}
tflog.Info(ctx, "ResourceDiskReplicationCheckPresence: load disk")
disk, err := c.CloudAPI().Disks().Get(ctx, req)
if err != nil {
return nil, nil, err
}
return disk, &status, nil
}
// DiskReadStatus loads disk resource by ids id, gets it current status.
// In case of failure returns errors.
func ReplicationDiskReadStatus(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ReplicationDiskReadStatus: Read status disk with ID", map[string]any{"disk_id": state.DiskId.ValueInt64()})
diags := diag.Diagnostics{}
_, _, err := ResourceDiskReplicationCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("ReplicationDiskReadStatus: Unable to Read Disk before status check", err.Error())
return diags
}
return nil
}

View File

@@ -0,0 +1,94 @@
package utilities
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
)
// DiskSnapshotCheckPresence checks if disk snapshot exists
func DiskSnapshotCheckPresence(ctx context.Context, plan *models.ResourceDiskSnapshotModel, c *decort.DecortClient) (*disks.ItemSnapshot, diag.Diagnostics) {
diags := diag.Diagnostics{}
// take diskId and label from plan
diskId := uint64(plan.DiskID.ValueInt64())
label := plan.Label.ValueString()
// take diskId and label from Id for imported resource
if strings.Contains(plan.Id.ValueString(), "#") {
diskIdInt, err := strconv.Atoi(strings.Split(plan.Id.ValueString(), "#")[0])
if err != nil {
diags.AddError("Cannot parse disk ID from state", err.Error())
return nil, diags
}
diskId = uint64(diskIdInt)
label = strings.Split(plan.Id.ValueString(), "#")[1]
}
tflog.Info(ctx, "Start DiskSnapshotCheckPresence", map[string]any{
"disk_id": diskId,
"label": label,
"id": plan.Id.ValueString(),
})
tflog.Info(ctx, "DiskSnapshotCheckPresence: before call CloudAPI().Disks().Get", map[string]any{"disk_id": diskId})
disk, err := c.CloudAPI().Disks().Get(ctx, disks.GetRequest{DiskID: diskId})
if err != nil {
diags.AddError(
fmt.Sprintf("Cannot get info about disk with disk_id %d", diskId),
err.Error(),
)
return nil, diags
}
tflog.Info(ctx, "DiskSnapshotCheckPresence: response from CloudAPI().Disks().Get", map[string]any{"response": disk})
for _, sn := range disk.Snapshots {
if label == sn.Label {
return &sn, nil
}
}
diags.AddError(
"Snapshot not found",
fmt.Sprintf("Snapshot with label %s for disk with disk_id %d not found", label, diskId),
)
return nil, diags
}
// RollbackDiskSnapshot rollbacks disk snapshot.
// Returns error in case of failures.
func RollbackDiskSnapshot(ctx context.Context, plan *models.ResourceDiskSnapshotModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
rollbackReq := disks.SnapshotRollbackRequest{
DiskID: uint64(plan.DiskID.ValueInt64()),
Label: plan.Label.ValueString(),
}
if !plan.TimeStamp.IsUnknown() {
rollbackReq.TimeStamp = uint64(plan.TimeStamp.ValueInt64())
}
tflog.Info(ctx, "RollbackDiskSnapshot: before calling CloudAPI().Disks().SnapshotRollback", map[string]any{"req": rollbackReq})
res, err := c.CloudAPI().Disks().SnapshotRollback(ctx, rollbackReq)
if err != nil {
diags.AddError(
"RollbackDiskSnapshot: Cannot rollback snapshot",
err.Error(),
)
return diags
}
tflog.Info(ctx, "RollbackDiskSnapshot: response from CloudAPI().Disks().SnapshotRollback", map[string]any{
"disk_id": plan.DiskID.ValueInt64(),
"label": plan.Label.ValueString(),
"response": res})
return nil
}