1.1.0
This commit is contained in:
91
internal/service/cloudbroker/disks/data_source_cb_disk.go
Normal file
91
internal/service/cloudbroker/disks/data_source_cb_disk.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package cbDisks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceDisk{}
|
||||
)
|
||||
|
||||
func NewDataSourceDisk() datasource.DataSource {
|
||||
return &dataSourceDisk{}
|
||||
}
|
||||
|
||||
// dataSourceDisk is the data source implementation.
|
||||
type dataSourceDisk struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceDisk) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DiskModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDisk: Error get state")
|
||||
return
|
||||
}
|
||||
diskId := uint64(state.DiskID.ValueInt64())
|
||||
tflog.Info(ctx, "Read dataSourceDisk: got state successfully", map[string]any{"disk_id": diskId})
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDisk: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceDisk: set timeouts successfully", map[string]any{
|
||||
"disk_id": diskId,
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.DiskDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDisk: Error flatten data source disk")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDisk: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceDisk", map[string]any{"disk_id": diskId})
|
||||
}
|
||||
|
||||
func (d *dataSourceDisk) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceDisk(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceDisk) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_disk"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceDisk) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceDisk")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceDisk successfully")
|
||||
}
|
||||
@@ -0,0 +1,88 @@
|
||||
package cbDisks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceDiskList{}
|
||||
)
|
||||
|
||||
func NewdataSourceDiskList() datasource.DataSource {
|
||||
return &dataSourceDiskList{}
|
||||
}
|
||||
|
||||
// dataSourceDisk is the data source implementation.
|
||||
type dataSourceDiskList struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DisksModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskList: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceDiskList: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskList: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceDiskList: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.DiskListDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskList: Error flatten")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskList: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceDiskList")
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceDiskList(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_disk_list"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceDiskList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceDiskList")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceDiskList successfully")
|
||||
}
|
||||
@@ -0,0 +1,88 @@
|
||||
package cbDisks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceDiskListDeleted{}
|
||||
)
|
||||
|
||||
func NewDataSourceDiskListDeleted() datasource.DataSource {
|
||||
return &dataSourceDiskListDeleted{}
|
||||
}
|
||||
|
||||
// dataSourceDisk is the data source implementation.
|
||||
type dataSourceDiskListDeleted struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskListDeleted) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.ListDisksModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceDiskListDeleted: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceDiskListDeleted: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.DiskListDeletedDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error flatten")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceDiskListDeleted")
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskListDeleted) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceDiskListDeleted(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_disk_list_deleted"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceDiskListDeleted) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceDiskListDeleted")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceDiskListDeleted successfully")
|
||||
}
|
||||
@@ -0,0 +1,88 @@
|
||||
package cbDisks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceDiskListTypes{}
|
||||
)
|
||||
|
||||
func NewDataSourceDiskListTypes() datasource.DataSource {
|
||||
return &dataSourceDiskListTypes{}
|
||||
}
|
||||
|
||||
// dataSourceDiskListTypes is the data source implementation.
|
||||
type dataSourceDiskListTypes struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskListTypes) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceDiskListTypesModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskListTypes: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceDiskListTypes: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskListTypes: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceDiskListTypes: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.DiskListTypesDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskListTypes: Error flatten")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskListTypes: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceDiskListTypes")
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskListTypes) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceDiskListTypes(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskListTypes) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_disk_list_types"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceDiskListTypes) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceDiskListTypes")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceDiskListTypes successfully")
|
||||
}
|
||||
@@ -0,0 +1,88 @@
|
||||
package cbDisks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceDiskListTypesDetailed{}
|
||||
)
|
||||
|
||||
func NewDataSourceDiskListTypesDetailed() datasource.DataSource {
|
||||
return &dataSourceDiskListTypesDetailed{}
|
||||
}
|
||||
|
||||
// dataSourceDiskListTypesDetailed is the data source implementation.
|
||||
type dataSourceDiskListTypesDetailed struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskListTypesDetailed) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceDiskListTypesDetailedModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceDiskListTypesDetailed: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceDiskListTypesDetailed: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.DiskListTypesDetailedDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error flatten")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceDiskListTypesDetailed")
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskListTypesDetailed) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceDiskListTypesDetailed(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskListTypesDetailed) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_disk_list_types_detailed"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceDiskListTypesDetailed) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceDiskListTypesDetailed")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceDiskListTypesDetailed successfully")
|
||||
}
|
||||
@@ -0,0 +1,88 @@
|
||||
package cbDisks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceDiskListUnattached{}
|
||||
)
|
||||
|
||||
func NewDataSourceDiskListUnattached() datasource.DataSource {
|
||||
return &dataSourceDiskListUnattached{}
|
||||
}
|
||||
|
||||
// dataSourceDiskListUnattached is the data source implementation.
|
||||
type dataSourceDiskListUnattached struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskListUnattached) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceDiskListUnattachedModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceDiskListUnattached: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceDiskListUnattached: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.DiskListUnattachedDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error flatten")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceDiskListUnattached")
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskListUnattached) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceDiskListUnattached(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskListUnattached) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_disk_list_unattached"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceDiskListUnattached) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceDiskListUnattached")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceDiskListUnattached successfully")
|
||||
}
|
||||
@@ -0,0 +1,91 @@
|
||||
package cbDisks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceDiskReplication{}
|
||||
)
|
||||
|
||||
func NewDataSourceDiskReplication() datasource.DataSource {
|
||||
return &dataSourceDiskReplication{}
|
||||
}
|
||||
|
||||
// dataSourceDisk is the data source implementation.
|
||||
type dataSourceDiskReplication struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskReplication) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.RecordDiskModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskReplication: Error get state")
|
||||
return
|
||||
}
|
||||
diskId := uint64(state.DiskId.ValueInt64())
|
||||
tflog.Info(ctx, "Read dataSourceDiskReplication: got state successfully", map[string]any{"disk_id": diskId})
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskReplication: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceDiskReplication: set timeouts successfully", map[string]any{
|
||||
"disk_id": diskId,
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.DiskReplicationDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskReplication: Error flatten data source disk")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskReplication: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceDiskReplication", map[string]any{"disk_id": diskId})
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskReplication) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceDiskReplication(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskReplication) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_disk_replication"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceDiskReplication) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceDisk")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceDisk successfully")
|
||||
}
|
||||
@@ -0,0 +1,96 @@
|
||||
package cbDisks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceDiskSnapshot{}
|
||||
)
|
||||
|
||||
func NewDataSourceDiskSnapshot() datasource.DataSource {
|
||||
return &dataSourceDiskSnapshot{}
|
||||
}
|
||||
|
||||
// dataSourceDiskSnapshotList is the data source implementation.
|
||||
type dataSourceDiskSnapshot struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskSnapshot) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceDiskSnapshotModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error get state")
|
||||
return
|
||||
}
|
||||
|
||||
ctxSnpsht := map[string]any{
|
||||
"disk_id": uint64(state.DiskID.ValueInt64()),
|
||||
"label": state.Label.ValueString(),
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceDiskSnapshot: got state successfully", ctxSnpsht)
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceDiskSnapshot: set timeouts successfully", map[string]any{
|
||||
"disk_id": uint64(state.DiskID.ValueInt64()),
|
||||
"label": state.Label.ValueString(),
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.DiskSnapshotDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error flatten data source")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceDiskSnapshot", ctxSnpsht)
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskSnapshot) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceDiskSnapshot(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskSnapshot) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_disk_snapshot"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceDiskSnapshot) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceDiskSnapshot")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceDiskSnapshot successfully")
|
||||
}
|
||||
@@ -0,0 +1,91 @@
|
||||
package cbDisks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &dataSourceDiskSnapshotList{}
|
||||
)
|
||||
|
||||
func NewDataSourceDiskSnapshotList() datasource.DataSource {
|
||||
return &dataSourceDiskSnapshotList{}
|
||||
}
|
||||
|
||||
// dataSourceDiskSnapshotList is the data source implementation.
|
||||
type dataSourceDiskSnapshotList struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskSnapshotList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
|
||||
// Read Terraform configuration data into the model
|
||||
var state models.DataSourceDiskSnapshotListModel
|
||||
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error get state")
|
||||
return
|
||||
}
|
||||
diskId := uint64(state.DiskID.ValueInt64())
|
||||
tflog.Info(ctx, "Read dataSourceDiskSnapshotList: got state successfully", map[string]any{"disk_id": diskId})
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read dataSourceDiskSnapshotList: set timeouts successfully", map[string]any{
|
||||
"disk_id": diskId,
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Map response body to schema
|
||||
resp.Diagnostics.Append(flattens.DiskSnapshotListDataSource(ctx, &state, d.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error flatten data source")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read dataSourceDiskSnapshotList", map[string]any{"disk_id": diskId})
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskSnapshotList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaDataSourceDiskSnapshotList(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataSourceDiskSnapshotList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_disk_snapshot_list"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (d *dataSourceDiskSnapshotList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure dataSourceDiskSnapshotList")
|
||||
d.client = client.DataSource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure dataSourceDiskSnapshotList successfully")
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
||||
)
|
||||
|
||||
// DiskDataSource flattens data source for disk list.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func DiskDataSource(ctx context.Context, state *models.DiskModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.DiskDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
recordDisk, diags := utilities.DataSourceDiskCheckPresence(ctx, state, c)
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskDataSource: before flatten")
|
||||
|
||||
diskAcl, _ := json.Marshal(recordDisk.ACL)
|
||||
|
||||
*state = models.DiskModel{
|
||||
DiskID: state.DiskID,
|
||||
Timeouts: state.Timeouts,
|
||||
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
|
||||
AccountName: types.StringValue(recordDisk.AccountName),
|
||||
SEPType: types.StringValue(recordDisk.SEPType),
|
||||
ACL: types.StringValue(string(diskAcl)),
|
||||
BootPartition: types.Int64Value(int64(recordDisk.BootPartition)),
|
||||
Computes: flattenComputeList(ctx, recordDisk.Computes),
|
||||
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
|
||||
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
|
||||
Description: types.StringValue(recordDisk.Description),
|
||||
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
|
||||
DiskPath: types.StringValue(recordDisk.DiskPath),
|
||||
DeviceName: types.StringValue(recordDisk.DeviceName),
|
||||
GID: types.Int64Value(int64(recordDisk.GID)),
|
||||
GUID: types.Int64Value(int64(recordDisk.GUID)),
|
||||
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
|
||||
IQN: types.StringValue(recordDisk.IQN),
|
||||
Login: types.StringValue(recordDisk.Login),
|
||||
Milestones: types.Int64Value(int64(recordDisk.Milestones)),
|
||||
Name: types.StringValue(recordDisk.Name),
|
||||
Order: types.Int64Value(int64(recordDisk.Order)),
|
||||
Params: types.StringValue(recordDisk.Params),
|
||||
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
|
||||
Password: types.StringValue(recordDisk.Password),
|
||||
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
|
||||
Pool: types.StringValue(recordDisk.Pool),
|
||||
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
|
||||
PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)),
|
||||
RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)),
|
||||
ReferenceID: types.StringValue(recordDisk.ReferenceID),
|
||||
ResID: types.StringValue(recordDisk.ResID),
|
||||
ResName: types.StringValue(recordDisk.ResName),
|
||||
Role: types.StringValue(recordDisk.Role),
|
||||
SEPID: types.Int64Value(int64(recordDisk.SEPID)),
|
||||
Shareable: types.BoolValue(recordDisk.Shareable),
|
||||
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
|
||||
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
|
||||
Snapshots: flattenSnapshotList(ctx, recordDisk.Snapshots),
|
||||
Status: types.StringValue(recordDisk.Status),
|
||||
TechStatus: types.StringValue(recordDisk.TechStatus),
|
||||
Type: types.StringValue(recordDisk.Type),
|
||||
VMID: types.Int64Value(int64(recordDisk.VMID)),
|
||||
}
|
||||
|
||||
state.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.DiskDataSource: cannot flatten recordDisk.Images to state.Images", diags))
|
||||
}
|
||||
state.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.DiskDataSource: cannot flatten recordDisk.PresentTo to state.PresentTo", diags))
|
||||
}
|
||||
|
||||
iotune := models.IOModel{
|
||||
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
|
||||
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
|
||||
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
|
||||
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
|
||||
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
|
||||
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
|
||||
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
|
||||
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
|
||||
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
|
||||
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
|
||||
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
|
||||
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
|
||||
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
|
||||
}
|
||||
|
||||
state.IOTune = &iotune
|
||||
|
||||
repl := models.ItemReplModel{
|
||||
DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)),
|
||||
PoolID: types.StringValue(recordDisk.Replication.PoolID),
|
||||
Role: types.StringValue(recordDisk.Replication.Role),
|
||||
SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID),
|
||||
StorageID: types.StringValue(recordDisk.Replication.StorageID),
|
||||
VolumeID: types.StringValue(recordDisk.Replication.VolumeID),
|
||||
}
|
||||
|
||||
state.Replication = &repl
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskDataSource: end flatten")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,180 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
||||
)
|
||||
|
||||
// DiskListDataSource flattens data source for disk list.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func DiskListDataSource(ctx context.Context, state *models.DisksModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.DiskListDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
diskList, diags := utilities.DataSourceDiskListCheckPresence(ctx, state, c)
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskListDataSource: before flatten")
|
||||
|
||||
*state = models.DisksModel{
|
||||
ByID: state.ByID,
|
||||
Name: state.Name,
|
||||
AccountName: state.AccountName,
|
||||
DiskMaxSize: state.DiskMaxSize,
|
||||
Shared: state.Shared,
|
||||
AccountID: state.AccountID,
|
||||
Type: state.Type,
|
||||
SortBy: state.SortBy,
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
// computed fields
|
||||
EntryCount: types.Int64Value(int64(diskList.EntryCount)),
|
||||
}
|
||||
|
||||
items := make([]models.DiskItemModel, 0, diskList.EntryCount)
|
||||
for _, recordDisk := range diskList.Data {
|
||||
diskAcl, _ := json.Marshal(recordDisk.ACL)
|
||||
d := models.DiskItemModel{
|
||||
MachineID: types.Int64Value(int64(recordDisk.MachineID)),
|
||||
MachineName: types.StringValue(recordDisk.MachineName),
|
||||
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
|
||||
AccountName: types.StringValue(recordDisk.AccountName),
|
||||
SEPType: types.StringValue(recordDisk.SEPType),
|
||||
ACL: types.StringValue(string(diskAcl)),
|
||||
BootPartition: types.Int64Value(int64(recordDisk.BootPartition)),
|
||||
Computes: flattenComputeList(ctx, recordDisk.Computes),
|
||||
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
|
||||
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
|
||||
Description: types.StringValue(recordDisk.Description),
|
||||
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
|
||||
DiskPath: types.StringValue(recordDisk.DiskPath),
|
||||
DeviceName: types.StringValue(recordDisk.DeviceName),
|
||||
GID: types.Int64Value(int64(recordDisk.GID)),
|
||||
GUID: types.Int64Value(int64(recordDisk.GUID)),
|
||||
ID: types.Int64Value(int64(recordDisk.ID)),
|
||||
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
|
||||
IQN: types.StringValue(recordDisk.IQN),
|
||||
Login: types.StringValue(recordDisk.Login),
|
||||
Milestones: types.Int64Value(int64(recordDisk.Milestones)),
|
||||
Name: types.StringValue(recordDisk.Name),
|
||||
Order: types.Int64Value(int64(recordDisk.Order)),
|
||||
Params: types.StringValue(recordDisk.Params),
|
||||
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
|
||||
Password: types.StringValue(recordDisk.Password),
|
||||
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
|
||||
Pool: types.StringValue(recordDisk.Pool),
|
||||
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
|
||||
PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)),
|
||||
RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)),
|
||||
ReferenceID: types.StringValue(recordDisk.ReferenceID),
|
||||
ResID: types.StringValue(recordDisk.ResID),
|
||||
ResName: types.StringValue(recordDisk.ResName),
|
||||
Role: types.StringValue(recordDisk.Role),
|
||||
SEPID: types.Int64Value(int64(recordDisk.SEPID)),
|
||||
Shareable: types.BoolValue(recordDisk.Shareable),
|
||||
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
|
||||
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
|
||||
Snapshots: flattenSnapshotList(ctx, recordDisk.Snapshots),
|
||||
Status: types.StringValue(recordDisk.Status),
|
||||
TechStatus: types.StringValue(recordDisk.TechStatus),
|
||||
Type: types.StringValue(recordDisk.Type),
|
||||
VMID: types.Int64Value(int64(recordDisk.VMID)),
|
||||
}
|
||||
|
||||
d.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.DiskListDataSource: cannot flatten recordDisk.Images to d.Images", diags))
|
||||
}
|
||||
d.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.DiskListDataSource: cannot flatten recordDisk.PresentTo to d.PresentTo", diags))
|
||||
}
|
||||
|
||||
iotune := models.IOModel{
|
||||
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
|
||||
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
|
||||
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
|
||||
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
|
||||
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
|
||||
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
|
||||
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
|
||||
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
|
||||
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
|
||||
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
|
||||
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
|
||||
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
|
||||
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
|
||||
}
|
||||
|
||||
d.IOTune = &iotune
|
||||
|
||||
repl := models.ItemReplModel{
|
||||
DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)),
|
||||
PoolID: types.StringValue(recordDisk.Replication.PoolID),
|
||||
Role: types.StringValue(recordDisk.Replication.Role),
|
||||
SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID),
|
||||
StorageID: types.StringValue(recordDisk.Replication.StorageID),
|
||||
VolumeID: types.StringValue(recordDisk.Replication.VolumeID),
|
||||
}
|
||||
|
||||
d.Replication = &repl
|
||||
|
||||
items = append(items, d)
|
||||
}
|
||||
state.Items = items
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskListDataSource: end flatten")
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenComputeList(ctx context.Context, computes map[string]string) []models.ComputeModel {
|
||||
tflog.Info(ctx, "Start flattenComputeList")
|
||||
|
||||
res := make([]models.ComputeModel, 0, len(computes))
|
||||
|
||||
for k, v := range computes {
|
||||
temp := models.ComputeModel{
|
||||
ComputeID: types.StringValue(k),
|
||||
ComputeName: types.StringValue(v),
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenComputeList")
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenSnapshotList(ctx context.Context, snapshots disks.ListSnapshots) []models.SnapshotModel {
|
||||
tflog.Info(ctx, "Start flattenSnapshotList")
|
||||
res := make([]models.SnapshotModel, 0, len(snapshots))
|
||||
for _, item := range snapshots {
|
||||
temp := models.SnapshotModel{
|
||||
GUID: types.StringValue(item.GUID),
|
||||
Label: types.StringValue(item.Label),
|
||||
ResID: types.StringValue(item.ResID),
|
||||
SnapSetGUID: types.StringValue(item.SnapSetGUID),
|
||||
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
|
||||
Timestamp: types.Int64Value(int64(item.Timestamp)),
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenSnapshotList")
|
||||
return res
|
||||
}
|
||||
@@ -0,0 +1,180 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
||||
)
|
||||
|
||||
// DiskListDeletedDataSource flattens data source for disk list deleted.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func DiskListDeletedDataSource(ctx context.Context, state *models.ListDisksModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.DiskListDeletedDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
diskList, diags := utilities.DataSourceDiskListDeletedCheckPresence(ctx, state, c)
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskListDeletedDataSource: before flatten")
|
||||
|
||||
*state = models.ListDisksModel{
|
||||
ByID: state.ByID,
|
||||
Name: state.Name,
|
||||
AccountName: state.AccountName,
|
||||
DiskMaxSize: state.DiskMaxSize,
|
||||
Shared: state.Shared,
|
||||
AccountID: state.AccountID,
|
||||
Type: state.Type,
|
||||
SortBy: state.SortBy,
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
// computed fields
|
||||
EntryCount: types.Int64Value(int64(diskList.EntryCount)),
|
||||
}
|
||||
|
||||
items := make([]models.ItemDiskModel, 0, diskList.EntryCount)
|
||||
for _, recordDisk := range diskList.Data {
|
||||
diskAcl, _ := json.Marshal(recordDisk.ACL)
|
||||
d := models.ItemDiskModel{
|
||||
MachineID: types.Int64Value(int64(recordDisk.MachineID)),
|
||||
MachineName: types.StringValue(recordDisk.MachineName),
|
||||
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
|
||||
AccountName: types.StringValue(recordDisk.AccountName),
|
||||
SEPType: types.StringValue(recordDisk.SEPType),
|
||||
ACL: types.StringValue(string(diskAcl)),
|
||||
BootPartition: types.Int64Value(int64(recordDisk.BootPartition)),
|
||||
Computes: flattenComputes(ctx, recordDisk.Computes),
|
||||
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
|
||||
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
|
||||
Description: types.StringValue(recordDisk.Description),
|
||||
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
|
||||
DiskPath: types.StringValue(recordDisk.DiskPath),
|
||||
DeviceName: types.StringValue(recordDisk.DeviceName),
|
||||
GID: types.Int64Value(int64(recordDisk.GID)),
|
||||
GUID: types.Int64Value(int64(recordDisk.GUID)),
|
||||
ID: types.Int64Value(int64(recordDisk.ID)),
|
||||
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
|
||||
IQN: types.StringValue(recordDisk.IQN),
|
||||
Login: types.StringValue(recordDisk.Login),
|
||||
Milestones: types.Int64Value(int64(recordDisk.Milestones)),
|
||||
Name: types.StringValue(recordDisk.Name),
|
||||
Order: types.Int64Value(int64(recordDisk.Order)),
|
||||
Params: types.StringValue(recordDisk.Params),
|
||||
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
|
||||
Password: types.StringValue(recordDisk.Password),
|
||||
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
|
||||
Pool: types.StringValue(recordDisk.Pool),
|
||||
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
|
||||
PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)),
|
||||
RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)),
|
||||
ReferenceID: types.StringValue(recordDisk.ReferenceID),
|
||||
ResID: types.StringValue(recordDisk.ResID),
|
||||
ResName: types.StringValue(recordDisk.ResName),
|
||||
Role: types.StringValue(recordDisk.Role),
|
||||
SEPID: types.Int64Value(int64(recordDisk.SEPID)),
|
||||
Shareable: types.BoolValue(recordDisk.Shareable),
|
||||
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
|
||||
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
|
||||
Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots),
|
||||
Status: types.StringValue(recordDisk.Status),
|
||||
TechStatus: types.StringValue(recordDisk.TechStatus),
|
||||
Type: types.StringValue(recordDisk.Type),
|
||||
VMID: types.Int64Value(int64(recordDisk.VMID)),
|
||||
}
|
||||
|
||||
d.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.DiskListDeletedDataSource: cannot flatten recordDisk.Images to d.Images", diags))
|
||||
}
|
||||
d.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.DiskListDeletedDataSource: cannot flatten recordDisk.PresentTo to d.PresentTo", diags))
|
||||
}
|
||||
|
||||
iotune := models.IOTuneModel{
|
||||
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
|
||||
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
|
||||
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
|
||||
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
|
||||
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
|
||||
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
|
||||
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
|
||||
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
|
||||
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
|
||||
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
|
||||
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
|
||||
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
|
||||
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
|
||||
}
|
||||
|
||||
d.IOTune = &iotune
|
||||
|
||||
repl := models.ItemReplicationModel{
|
||||
DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)),
|
||||
PoolID: types.StringValue(recordDisk.Replication.PoolID),
|
||||
Role: types.StringValue(recordDisk.Replication.Role),
|
||||
SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID),
|
||||
StorageID: types.StringValue(recordDisk.Replication.StorageID),
|
||||
VolumeID: types.StringValue(recordDisk.Replication.VolumeID),
|
||||
}
|
||||
|
||||
d.Replication = &repl
|
||||
|
||||
items = append(items, d)
|
||||
}
|
||||
state.Items = items
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskListDeletedDataSource: end flatten")
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenComputes(ctx context.Context, computes map[string]string) []models.ComputesModel {
|
||||
tflog.Info(ctx, "Start flattenComputes")
|
||||
|
||||
res := make([]models.ComputesModel, 0, len(computes))
|
||||
|
||||
for k, v := range computes {
|
||||
temp := models.ComputesModel{
|
||||
ComputeID: types.StringValue(k),
|
||||
ComputeName: types.StringValue(v),
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenComputes")
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenSnapshots(ctx context.Context, snapshots disks.ListSnapshots) []models.ItemSnapshotModel {
|
||||
tflog.Info(ctx, "Start flattenSnapshots")
|
||||
res := make([]models.ItemSnapshotModel, 0, len(snapshots))
|
||||
for _, item := range snapshots {
|
||||
temp := models.ItemSnapshotModel{
|
||||
GUID: types.StringValue(item.GUID),
|
||||
Label: types.StringValue(item.Label),
|
||||
ResID: types.StringValue(item.ResID),
|
||||
SnapSetGUID: types.StringValue(item.SnapSetGUID),
|
||||
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
|
||||
Timestamp: types.Int64Value(int64(item.Timestamp)),
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenSnapshots")
|
||||
return res
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
||||
)
|
||||
|
||||
// DiskListTypesDataSource flattens data source for disk list types.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func DiskListTypesDataSource(ctx context.Context, state *models.DataSourceDiskListTypesModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.DiskListTypesDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
listTypes, diags := utilities.DataSourceDiskListTypesCheckPresence(ctx, state, c)
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskListTypesDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceDiskListTypesModel{
|
||||
SortBy: state.SortBy,
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
// computed fields
|
||||
Id: types.StringValue(id.String()),
|
||||
EntryCount: types.Int64Value(int64(listTypes.EntryCount)),
|
||||
}
|
||||
|
||||
state.Items, diags = types.ListValueFrom(ctx, types.StringType, listTypes.Data)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.DiskListTypesDataSource: cannot flatten listTypes.Data to state.Items", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskListTypesDataSource: end flatten")
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,82 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
||||
)
|
||||
|
||||
// DiskListTypesDetailedDataSource flattens data source for disk list types detailed.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func DiskListTypesDetailedDataSource(ctx context.Context, state *models.DataSourceDiskListTypesDetailedModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.DiskListTypesDetailedDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
listTypes, diags := utilities.DataSourceDiskListTypesDetailedCheckPresence(ctx, state, c)
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskListTypesDetailedDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceDiskListTypesDetailedModel{
|
||||
SortBy: state.SortBy,
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
// computed fields
|
||||
Id: types.StringValue(id.String()),
|
||||
EntryCount: types.Int64Value(int64(listTypes.EntryCount)),
|
||||
}
|
||||
|
||||
items := make([]models.ItemDiskTypeDetailedModel, 0, len(listTypes.Data))
|
||||
for _, typeListDetailed := range listTypes.Data {
|
||||
typeMap := typeListDetailed.(map[string]interface{})
|
||||
|
||||
t := models.ItemDiskTypeDetailedModel{
|
||||
SepID: types.Int64Value(int64(typeMap["sepId"].(float64))),
|
||||
SepName: types.StringValue(typeMap["sepName"].(string)),
|
||||
}
|
||||
|
||||
var pools []models.ItemPoolModel
|
||||
poolsTemp := typeMap["pools"].([]interface{})
|
||||
for _, pool := range poolsTemp {
|
||||
poolsMap := pool.(map[string]interface{})
|
||||
p := models.ItemPoolModel{
|
||||
Name: types.StringValue(poolsMap["name"].(string)),
|
||||
System: types.StringValue(poolsMap["system"].(string)),
|
||||
}
|
||||
|
||||
p.Types, diags = types.ListValueFrom(ctx, types.StringType, flattenTypes(poolsMap["types"].([]interface{})))
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.DiskListTypesDetailedDataSource: cannot flatten poolsMap[\"types\"] to p.Types", diags))
|
||||
}
|
||||
pools = append(pools, p)
|
||||
}
|
||||
t.Pools = pools
|
||||
items = append(items, t)
|
||||
}
|
||||
state.Items = items
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskListTypesDetailedDataSource: end flatten")
|
||||
return diags
|
||||
}
|
||||
|
||||
func flattenTypes(typesInterface []interface{}) []string {
|
||||
var typesList []string
|
||||
for _, typ := range typesInterface {
|
||||
typesList = append(typesList, typ.(string))
|
||||
}
|
||||
return typesList
|
||||
}
|
||||
@@ -0,0 +1,166 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
||||
)
|
||||
|
||||
// DiskListUnattachedDataSource flattens data source for disk list unattached.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func DiskListUnattachedDataSource(ctx context.Context, state *models.DataSourceDiskListUnattachedModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.DiskListUnattachedDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
diskList, diags := utilities.DataSourceDiskListUnattachedCheckPresence(ctx, state, c)
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskListUnattachedDataSource: before flatten")
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceDiskListUnattachedModel{
|
||||
ByID: state.ByID,
|
||||
AccountName: state.AccountName,
|
||||
DiskMaxSize: state.DiskMaxSize,
|
||||
Status: state.Status,
|
||||
AccountID: state.AccountID,
|
||||
SepID: state.SepID,
|
||||
PoolName: state.PoolName,
|
||||
Type: state.Type,
|
||||
SortBy: state.SortBy,
|
||||
Page: state.Page,
|
||||
Size: state.Size,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
// computed fields
|
||||
Id: types.StringValue(id.String()),
|
||||
EntryCount: types.Int64Value(int64(diskList.EntryCount)),
|
||||
}
|
||||
|
||||
items := make([]models.ItemDiskUnattachedModel, 0, diskList.EntryCount)
|
||||
for _, recordDisk := range diskList.Data {
|
||||
diskAcl, _ := json.Marshal(recordDisk.ACL)
|
||||
d := models.ItemDiskUnattachedModel{
|
||||
CKey: types.StringValue(recordDisk.CKey),
|
||||
Meta: flattens.Meta(ctx, recordDisk.Meta),
|
||||
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
|
||||
AccountName: types.StringValue(recordDisk.AccountName),
|
||||
ACL: types.StringValue(string(diskAcl)),
|
||||
BootPartition: types.Int64Value(int64(recordDisk.BootPartition)),
|
||||
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
|
||||
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
|
||||
Description: types.StringValue(recordDisk.Description),
|
||||
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
|
||||
DiskPath: types.StringValue(recordDisk.DiskPath),
|
||||
GID: types.Int64Value(int64(recordDisk.GID)),
|
||||
GUID: types.Int64Value(int64(recordDisk.GUID)),
|
||||
DiskId: types.Int64Value(int64(recordDisk.ID)),
|
||||
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
|
||||
Iqn: types.StringValue(recordDisk.IQN),
|
||||
Login: types.StringValue(recordDisk.Login),
|
||||
Milestones: types.Int64Value(int64(recordDisk.Milestones)),
|
||||
DiskName: types.StringValue(recordDisk.Name),
|
||||
Order: types.Int64Value(int64(recordDisk.Order)),
|
||||
Params: types.StringValue(recordDisk.Params),
|
||||
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
|
||||
Passwd: types.StringValue(recordDisk.Password),
|
||||
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
|
||||
Pool: types.StringValue(recordDisk.Pool),
|
||||
PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)),
|
||||
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
|
||||
RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)),
|
||||
ReferenceID: types.StringValue(recordDisk.ReferenceID),
|
||||
ResID: types.StringValue(recordDisk.ResID),
|
||||
ResName: types.StringValue(recordDisk.ResName),
|
||||
Role: types.StringValue(recordDisk.Role),
|
||||
SepID: types.Int64Value(int64(recordDisk.SEPID)),
|
||||
Shareable: types.BoolValue(recordDisk.Shareable),
|
||||
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
|
||||
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
|
||||
Snapshots: flattenUnattachedDiskSnapshots(ctx, recordDisk.Snapshots),
|
||||
Status: types.StringValue(recordDisk.Status),
|
||||
TechStatus: types.StringValue(recordDisk.TechStatus),
|
||||
Type: types.StringValue(recordDisk.Type),
|
||||
VMID: types.Int64Value(int64(recordDisk.VMID)),
|
||||
}
|
||||
|
||||
d.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.DiskListUnattachedDataSource: cannot flatten recordDisk.Images to d.Images", diags))
|
||||
}
|
||||
|
||||
d.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.DiskListUnattachedDataSource: cannot flatten recordDisk.PresentTo to d.PresentTo", diags))
|
||||
}
|
||||
|
||||
iotune := models.IOTuneModel{
|
||||
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
|
||||
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
|
||||
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
|
||||
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
|
||||
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
|
||||
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
|
||||
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
|
||||
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
|
||||
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
|
||||
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
|
||||
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
|
||||
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
|
||||
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
|
||||
}
|
||||
obj, diags := types.ObjectValueFrom(ctx, models.ItemIOTune, iotune)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattens.DiskListUnattachedDataSource iotune struct to obj", diags))
|
||||
}
|
||||
d.IOTune = obj
|
||||
|
||||
items = append(items, d)
|
||||
}
|
||||
state.Items = items
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskListUnattachedDataSource: end flatten")
|
||||
return diags
|
||||
}
|
||||
|
||||
func flattenUnattachedDiskSnapshots(ctx context.Context, snapshots disks.ListSnapshots) types.List {
|
||||
tflog.Info(ctx, "Start flattenSnapshots")
|
||||
tempSlice := make([]types.Object, 0, len(snapshots))
|
||||
for _, item := range snapshots {
|
||||
temp := models.DiskUnattachedItemSnapshotModel{
|
||||
GUID: types.StringValue(item.GUID),
|
||||
Label: types.StringValue(item.Label),
|
||||
ResID: types.StringValue(item.ResID),
|
||||
SnapSetGUID: types.StringValue(item.SnapSetGUID),
|
||||
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
|
||||
Timestamp: types.Int64Value(int64(item.Timestamp)),
|
||||
}
|
||||
obj, diags := types.ObjectValueFrom(ctx, models.ItemSnapshot, temp)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenSnapshots struct to obj", diags))
|
||||
}
|
||||
tempSlice = append(tempSlice, obj)
|
||||
}
|
||||
|
||||
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemSnapshot}, tempSlice)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenSnapshots", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenSnapshots")
|
||||
return res
|
||||
}
|
||||
@@ -0,0 +1,167 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
||||
)
|
||||
|
||||
// DiskReplicationDataSource flattens data source for disk.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func DiskReplicationDataSource(ctx context.Context, state *models.RecordDiskModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.DiskReplicationDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
recordDisk, status, err := utilities.DataSourceDiskReplicationCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError(fmt.Sprintf("Cannot get info about disk"), err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskReplicationDataSource: before flatten")
|
||||
|
||||
diskAcl, _ := json.Marshal(recordDisk.ACL)
|
||||
|
||||
*state = models.RecordDiskModel{
|
||||
DiskId: state.DiskId,
|
||||
Timeouts: state.Timeouts,
|
||||
ACL: types.StringValue(string(diskAcl)),
|
||||
BootPartition: types.Int64Value(int64(recordDisk.BootPartition)),
|
||||
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
|
||||
AccountName: types.StringValue(recordDisk.AccountName),
|
||||
Computes: flattenDRComputes(ctx, recordDisk.Computes),
|
||||
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
|
||||
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
|
||||
DeviceName: types.StringValue(recordDisk.DeviceName),
|
||||
DiskPath: types.StringValue(recordDisk.DiskPath),
|
||||
Description: types.StringValue(recordDisk.Description),
|
||||
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
|
||||
GID: types.Int64Value(int64(recordDisk.GID)),
|
||||
GUID: types.Int64Value(int64(recordDisk.GUID)),
|
||||
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
|
||||
Images: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.Images),
|
||||
Name: types.StringValue(recordDisk.Name),
|
||||
IQN: types.StringValue(recordDisk.IQN),
|
||||
Login: types.StringValue(recordDisk.Login),
|
||||
Milestones: types.Int64Value(int64(recordDisk.Milestones)),
|
||||
Order: types.Int64Value(int64(recordDisk.Order)),
|
||||
Params: types.StringValue(recordDisk.Params),
|
||||
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
|
||||
Passwd: types.StringValue(recordDisk.Password),
|
||||
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
|
||||
Pool: types.StringValue(recordDisk.Pool),
|
||||
PresentTo: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.PresentTo),
|
||||
PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)),
|
||||
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
|
||||
RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)),
|
||||
ReferenceID: types.StringValue(recordDisk.ReferenceID),
|
||||
ResID: types.StringValue(recordDisk.ResID),
|
||||
ResName: types.StringValue(recordDisk.ResName),
|
||||
Role: types.StringValue(recordDisk.Role),
|
||||
SepType: types.StringValue(recordDisk.SEPType),
|
||||
SepID: types.Int64Value(int64(recordDisk.SEPID)),
|
||||
Shareable: types.BoolValue(recordDisk.Shareable),
|
||||
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
|
||||
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
|
||||
Snapshots: diskListFlattenSnapshots(ctx, recordDisk.Snapshots),
|
||||
Status: types.StringValue(recordDisk.Status),
|
||||
StatusReplication: types.StringValue(*status),
|
||||
TechStatus: types.StringValue(recordDisk.TechStatus),
|
||||
Type: types.StringValue(recordDisk.Type),
|
||||
VMID: types.Int64Value(int64(recordDisk.VMID)),
|
||||
}
|
||||
|
||||
iotune := models.DiskReplicationIOTune{
|
||||
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
|
||||
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
|
||||
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
|
||||
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
|
||||
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
|
||||
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
|
||||
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
|
||||
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
|
||||
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
|
||||
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
|
||||
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
|
||||
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
|
||||
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
|
||||
}
|
||||
state.IOTune = &iotune
|
||||
|
||||
itemReplication := models.ItemReplicationModel{
|
||||
DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)),
|
||||
PoolID: types.StringValue(recordDisk.Replication.PoolID),
|
||||
Role: types.StringValue(recordDisk.Replication.Role),
|
||||
SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID),
|
||||
StorageID: types.StringValue(recordDisk.Replication.StorageID),
|
||||
VolumeID: types.StringValue(recordDisk.Replication.VolumeID),
|
||||
}
|
||||
|
||||
state.Replication = &itemReplication
|
||||
|
||||
tflog.Info(ctx, "flattens.ReplicationDiskDataSource: end flatten")
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenDRComputes(ctx context.Context, items map[string]string) types.List {
|
||||
tflog.Info(ctx, "Start flattenDRComputes")
|
||||
tempSlice := make([]types.Object, 0, len(items))
|
||||
for id, name := range items {
|
||||
temp := models.ItemComputeModel{
|
||||
ComputeId: types.StringValue(id),
|
||||
ComputeName: types.StringValue(name),
|
||||
}
|
||||
obj, diags := types.ObjectValueFrom(ctx, models.ItemCompute, temp)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenDRComputes struct to obj", diags))
|
||||
}
|
||||
tempSlice = append(tempSlice, obj)
|
||||
}
|
||||
|
||||
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemCompute}, tempSlice)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenDRComputes", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenDRComputes")
|
||||
return res
|
||||
}
|
||||
|
||||
func diskListFlattenSnapshots(ctx context.Context, snapshots disks.ListSnapshots) types.List {
|
||||
tflog.Info(ctx, "Start flattenSnapshots")
|
||||
tempSlice := make([]types.Object, 0, len(snapshots))
|
||||
for _, item := range snapshots {
|
||||
temp := models.DiskReplicationItemSnapshot{
|
||||
GUID: types.StringValue(item.GUID),
|
||||
Label: types.StringValue(item.Label),
|
||||
ResID: types.StringValue(item.ResID),
|
||||
SnapSetGUID: types.StringValue(item.SnapSetGUID),
|
||||
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
|
||||
TimeStamp: types.Int64Value(int64(item.Timestamp)),
|
||||
}
|
||||
obj, diags := types.ObjectValueFrom(ctx, models.DiskReplicationSnapshot, temp)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error diskListFlattenSnapshots struct to obj", diags))
|
||||
}
|
||||
tempSlice = append(tempSlice, obj)
|
||||
}
|
||||
|
||||
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemSnapshot}, tempSlice)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error diskListFlattenSnapshots", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End diskListFlattenSnapshots")
|
||||
return res
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
||||
)
|
||||
|
||||
// DiskSnapshotDataSource flattens data source for disk snapshot.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func DiskSnapshotDataSource(ctx context.Context, state *models.DataSourceDiskSnapshotModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.DiskSnapshotDataSource")
|
||||
|
||||
diskId := uint64(state.DiskID.ValueInt64())
|
||||
|
||||
item, diags := utilities.DataSourceDiskSnapshotCheckPresence(ctx, state, c)
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskSnapshotDataSource: before flatten", map[string]any{"disk_id": diskId, "snapshot": item})
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceDiskSnapshotModel{
|
||||
DiskID: state.DiskID,
|
||||
Label: state.Label,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
// computed fields
|
||||
Id: types.StringValue(id.String()),
|
||||
GUID: types.StringValue(item.GUID),
|
||||
ResID: types.StringValue(item.ResID),
|
||||
SnapSetGUID: types.StringValue(item.SnapSetGUID),
|
||||
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
|
||||
TimeStamp: types.Int64Value(int64(item.Timestamp)),
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskSnapshotDataSource: end flatten", map[string]any{
|
||||
"disk_id": state.DiskID.ValueInt64(),
|
||||
"label": state.Label.ValueString(),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,61 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
||||
)
|
||||
|
||||
// DiskSnapshotListDataSource flattens data source for disk snapshot list.
|
||||
// Return error in case data source is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func DiskSnapshotListDataSource(ctx context.Context, state *models.DataSourceDiskSnapshotListModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.DiskSnapshotListDataSource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
diskId := uint64(state.DiskID.ValueInt64())
|
||||
|
||||
snapshots, err := utilities.DiskSnapshotListCheckPresence(ctx, diskId, c)
|
||||
if err != nil {
|
||||
diags.AddError(fmt.Sprintf("Cannot get info about disk snapshot list with disk ID %v", diskId), err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskSnapshotListDataSource: before flatten", map[string]any{"disk_id": diskId, "snapshots": snapshots})
|
||||
|
||||
id := uuid.New()
|
||||
*state = models.DataSourceDiskSnapshotListModel{
|
||||
DiskID: state.DiskID,
|
||||
Timeouts: state.Timeouts,
|
||||
|
||||
// computed fields
|
||||
Id: types.StringValue(id.String()),
|
||||
}
|
||||
|
||||
res := make([]models.DiskItemSnapshot, 0, len(*snapshots))
|
||||
for _, item := range *snapshots {
|
||||
temp := models.DiskItemSnapshot{
|
||||
GUID: types.StringValue(item.GUID),
|
||||
Label: types.StringValue(item.Label),
|
||||
ResID: types.StringValue(item.ResID),
|
||||
ReferenceID: types.StringValue(item.ReferenceID),
|
||||
SnapSetGUID: types.StringValue(item.SnapSetGUID),
|
||||
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
|
||||
TimeStamp: types.Int64Value(int64(item.Timestamp)),
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
state.Items = res
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskSnapshotListDataSource: end flatten", map[string]any{"disk_id": state.DiskID.ValueInt64()})
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,191 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
||||
)
|
||||
|
||||
// DiskResource flattens resource for disk.
|
||||
// Return error in case resource is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func DiskResource(ctx context.Context, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.DiskResource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
diskId, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64)
|
||||
if err != nil {
|
||||
diags.AddError("flattens.DiskResource: Cannot parse disk ID from state", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
recordDisk, err := utilities.DiskCheckPresence(ctx, diskId, c)
|
||||
if err != nil {
|
||||
diags.AddError(fmt.Sprintf("Cannot get info about disk with ID %v", diskId), err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskResource: before flatten", map[string]any{"disk_id": diskId, "recordDisk": recordDisk})
|
||||
|
||||
diskAcl, _ := json.Marshal(recordDisk.ACL)
|
||||
|
||||
*plan = models.ResourceDiskModel{
|
||||
// required fields
|
||||
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
|
||||
DiskName: types.StringValue(recordDisk.Name),
|
||||
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
|
||||
GID: types.Int64Value(int64(recordDisk.GID)),
|
||||
|
||||
// optional fields
|
||||
SSDSize: plan.SSDSize,
|
||||
NodeIDs: plan.NodeIDs,
|
||||
Description: plan.Description,
|
||||
Pool: plan.Pool,
|
||||
SEPID: plan.SEPID,
|
||||
Type: plan.Type,
|
||||
Detach: plan.Detach,
|
||||
Permanently: plan.Permanently,
|
||||
Shareable: plan.Shareable,
|
||||
Timeouts: plan.Timeouts,
|
||||
|
||||
// computed fields
|
||||
LastUpdated: plan.LastUpdated,
|
||||
Id: types.StringValue(strconv.Itoa(int(recordDisk.ID))),
|
||||
DiskId: types.Int64Value(int64(recordDisk.ID)),
|
||||
AccountName: types.StringValue(recordDisk.AccountName),
|
||||
ACL: types.StringValue(string(diskAcl)),
|
||||
Computes: flattenResourceComputes(ctx, recordDisk.Computes),
|
||||
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
|
||||
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
|
||||
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
|
||||
DeviceName: types.StringValue(recordDisk.DeviceName),
|
||||
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
|
||||
Order: types.Int64Value(int64(recordDisk.Order)),
|
||||
Params: types.StringValue(recordDisk.Params),
|
||||
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
|
||||
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
|
||||
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
|
||||
ResID: types.StringValue(recordDisk.ResID),
|
||||
ResName: types.StringValue(recordDisk.ResName),
|
||||
Role: types.StringValue(recordDisk.Role),
|
||||
SepType: types.StringValue(recordDisk.SEPType),
|
||||
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
|
||||
Snapshots: flattenResourceDiskSnapshots(ctx, recordDisk.Snapshots),
|
||||
Status: types.StringValue(recordDisk.Status),
|
||||
TechStatus: types.StringValue(recordDisk.TechStatus),
|
||||
VMID: types.Int64Value(int64(recordDisk.VMID)),
|
||||
}
|
||||
|
||||
plan.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.DiskResource: cannot flatten recordDisk.Images to plan.Images", diags))
|
||||
}
|
||||
plan.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("flattens.DiskResource: cannot flatten recordDisk.PresentTo to plan.PresentTo", diags))
|
||||
}
|
||||
|
||||
if plan.Description.IsUnknown() {
|
||||
plan.Description = types.StringValue(recordDisk.Description)
|
||||
}
|
||||
if plan.Pool.IsUnknown() {
|
||||
plan.Pool = types.StringValue(recordDisk.Pool)
|
||||
}
|
||||
if plan.SEPID.IsUnknown() {
|
||||
plan.SEPID = types.Int64Value(int64(recordDisk.SEPID))
|
||||
}
|
||||
if plan.Shareable.IsUnknown() {
|
||||
plan.Shareable = types.BoolValue(recordDisk.Shareable)
|
||||
}
|
||||
if plan.Type.IsUnknown() {
|
||||
plan.Type = types.StringValue(recordDisk.Type)
|
||||
}
|
||||
|
||||
iotune := models.ResourceIOTuneModel{
|
||||
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
|
||||
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
|
||||
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
|
||||
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
|
||||
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
|
||||
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
|
||||
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
|
||||
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
|
||||
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
|
||||
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
|
||||
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
|
||||
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
|
||||
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
|
||||
}
|
||||
obj, diags := types.ObjectValueFrom(ctx, models.ItemIOTune, iotune)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattens.DiskResource iotune struct to obj", diags))
|
||||
}
|
||||
plan.IOTune = obj
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskResource: after flatten", map[string]any{"disk_id": plan.Id.ValueString()})
|
||||
|
||||
tflog.Info(ctx, "End flattens.DiskResource")
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenResourceComputes(ctx context.Context, items map[string]string) types.List {
|
||||
tflog.Info(ctx, "Start flattenResourceComputes")
|
||||
tempSlice := make([]types.Object, 0, len(items))
|
||||
for id, name := range items {
|
||||
temp := models.ItemComputeModel{
|
||||
ComputeId: types.StringValue(id),
|
||||
ComputeName: types.StringValue(name),
|
||||
}
|
||||
obj, diags := types.ObjectValueFrom(ctx, models.ItemCompute, temp)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenResourceComputes struct to obj", diags))
|
||||
}
|
||||
tempSlice = append(tempSlice, obj)
|
||||
}
|
||||
|
||||
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemCompute}, tempSlice)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenResourceComputes", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenComputes")
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenResourceDiskSnapshots(ctx context.Context, snapshots disks.ListSnapshots) types.List {
|
||||
tflog.Info(ctx, "Start flattenResourceDiskSnapshots")
|
||||
tempSlice := make([]types.Object, 0, len(snapshots))
|
||||
for _, item := range snapshots {
|
||||
temp := models.ItemSnapshotsModel{
|
||||
GUID: types.StringValue(item.GUID),
|
||||
Label: types.StringValue(item.Label),
|
||||
ResID: types.StringValue(item.ResID),
|
||||
SnapSetGUID: types.StringValue(item.SnapSetGUID),
|
||||
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
|
||||
TimeStamp: types.Int64Value(int64(item.Timestamp)),
|
||||
}
|
||||
obj, diags := types.ObjectValueFrom(ctx, models.ItemSnapshots, temp)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenResourceDiskSnapshots struct to obj", diags))
|
||||
}
|
||||
tempSlice = append(tempSlice, obj)
|
||||
}
|
||||
|
||||
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemSnapshots}, tempSlice)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenResourceDiskSnapshots", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenResourceDiskSnapshots")
|
||||
return res
|
||||
}
|
||||
@@ -0,0 +1,184 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
||||
)
|
||||
|
||||
// DiskReplicationResource flattens resource for disk.
|
||||
// Return error in case resource is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func DiskReplicationResource(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.DiskReplicationresource")
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
recordDisk, status, err := utilities.ResourceDiskReplicationCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about disk", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskReplicationresource: before flatten")
|
||||
|
||||
diskAcl, _ := json.Marshal(recordDisk.ACL)
|
||||
|
||||
*state = models.ResourceRecordDiskReplicationModel{
|
||||
DiskId: state.DiskId,
|
||||
Name: state.Name,
|
||||
SepID: state.SepID,
|
||||
ReplicationId: state.ReplicationId,
|
||||
Timeouts: state.Timeouts,
|
||||
PoolName: state.PoolName,
|
||||
Pause: state.Pause,
|
||||
Reverse: state.Reverse,
|
||||
Start: state.Start,
|
||||
Detach: state.Detach,
|
||||
Permanently: state.Permanently,
|
||||
Id: types.StringValue(strconv.Itoa(int(recordDisk.Replication.DiskID))),
|
||||
ACL: types.StringValue(string(diskAcl)),
|
||||
BootPartition: types.Int64Value(int64(recordDisk.BootPartition)),
|
||||
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
|
||||
AccountName: types.StringValue(recordDisk.AccountName),
|
||||
Computes: flattenRComputes(ctx, recordDisk.Computes),
|
||||
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
|
||||
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
|
||||
DeviceName: types.StringValue(recordDisk.DeviceName),
|
||||
DiskPath: types.StringValue(recordDisk.DiskPath),
|
||||
Description: types.StringValue(recordDisk.Description),
|
||||
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
|
||||
GUID: types.Int64Value(int64(recordDisk.GUID)),
|
||||
GID: types.Int64Value(int64(recordDisk.GID)),
|
||||
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
|
||||
Images: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.Images),
|
||||
IQN: types.StringValue(recordDisk.IQN),
|
||||
Login: types.StringValue(recordDisk.Login),
|
||||
Milestones: types.Int64Value(int64(recordDisk.Milestones)),
|
||||
Order: types.Int64Value(int64(recordDisk.Order)),
|
||||
Params: types.StringValue(recordDisk.Params),
|
||||
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
|
||||
Passwd: types.StringValue(recordDisk.Password),
|
||||
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
|
||||
Pool: types.StringValue(recordDisk.Pool),
|
||||
PresentTo: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.PresentTo),
|
||||
PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)),
|
||||
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
|
||||
RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)),
|
||||
ReferenceID: types.StringValue(recordDisk.ReferenceID),
|
||||
ResID: types.StringValue(recordDisk.ResID),
|
||||
ResName: types.StringValue(recordDisk.ResName),
|
||||
Role: types.StringValue(recordDisk.Role),
|
||||
SepType: types.StringValue(recordDisk.SEPType),
|
||||
Shareable: types.BoolValue(recordDisk.Shareable),
|
||||
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
|
||||
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
|
||||
Snapshots: replicationFlattenSnapshots(ctx, recordDisk.Snapshots),
|
||||
Status: types.StringValue(recordDisk.Status),
|
||||
StatusReplication: types.StringValue(*status),
|
||||
TechStatus: types.StringValue(recordDisk.TechStatus),
|
||||
Type: types.StringValue(recordDisk.Type),
|
||||
VMID: types.Int64Value(int64(recordDisk.VMID)),
|
||||
}
|
||||
|
||||
iotune := models.ResourceDiskReplicationIOTuneModel{
|
||||
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
|
||||
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
|
||||
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
|
||||
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
|
||||
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
|
||||
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
|
||||
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
|
||||
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
|
||||
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
|
||||
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
|
||||
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
|
||||
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
|
||||
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
|
||||
}
|
||||
obj, diags := types.ObjectValueFrom(ctx, models.ResourceDiskReplicationIOTune, iotune)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattens.ReplicationDiskresource iotune struct to obj", diags))
|
||||
}
|
||||
state.IOTune = obj
|
||||
|
||||
itemReplication := models.ResourceItemReplicationModel{
|
||||
DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)),
|
||||
PoolID: types.StringValue(recordDisk.Replication.PoolID),
|
||||
Role: types.StringValue(recordDisk.Replication.Role),
|
||||
SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID),
|
||||
StorageID: types.StringValue(recordDisk.Replication.StorageID),
|
||||
VolumeID: types.StringValue(recordDisk.Replication.VolumeID),
|
||||
}
|
||||
|
||||
obj, diags = types.ObjectValueFrom(ctx, models.ResourceItemReplication, itemReplication)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattens.ReplicationDiskresource Replication struct to obj", diags))
|
||||
}
|
||||
state.Replication = obj
|
||||
|
||||
tflog.Info(ctx, "flattens.ReplicationDiskresource: end flatten")
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenRComputes(ctx context.Context, items map[string]string) types.List {
|
||||
tflog.Info(ctx, "Start flattenRComputes")
|
||||
tempSlice := make([]types.Object, 0, len(items))
|
||||
for id, name := range items {
|
||||
temp := models.ResourceReplicationItemComputeModel{
|
||||
ComputeId: types.StringValue(id),
|
||||
ComputeName: types.StringValue(name),
|
||||
}
|
||||
obj, diags := types.ObjectValueFrom(ctx, models.ResourceReplicationItemCompute, temp)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenRComputes struct to obj", diags))
|
||||
}
|
||||
tempSlice = append(tempSlice, obj)
|
||||
}
|
||||
|
||||
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemCompute}, tempSlice)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenRComputes", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenRComputes")
|
||||
return res
|
||||
}
|
||||
|
||||
func replicationFlattenSnapshots(ctx context.Context, snapshots disks.ListSnapshots) types.List {
|
||||
tflog.Info(ctx, "Start flattenSnapshots")
|
||||
tempSlice := make([]types.Object, 0, len(snapshots))
|
||||
for _, item := range snapshots {
|
||||
temp := models.ResourceReplicationItemSnapshotModel{
|
||||
GUID: types.StringValue(item.GUID),
|
||||
Label: types.StringValue(item.Label),
|
||||
ResID: types.StringValue(item.ResID),
|
||||
SnapSetGUID: types.StringValue(item.SnapSetGUID),
|
||||
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
|
||||
TimeStamp: types.Int64Value(int64(item.Timestamp)),
|
||||
}
|
||||
obj, diags := types.ObjectValueFrom(ctx, models.ResourceReplicationItemSnapshot, temp)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenSnapshots struct to obj", diags))
|
||||
}
|
||||
tempSlice = append(tempSlice, obj)
|
||||
}
|
||||
|
||||
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ResourceReplicationItemSnapshot}, tempSlice)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenSnapshots", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End flattenSnapshots")
|
||||
return res
|
||||
}
|
||||
@@ -0,0 +1,65 @@
|
||||
package flattens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
||||
)
|
||||
|
||||
// DiskSnapshotResource flattens resource for disk snapshot.
|
||||
// Return error in case resource is not found on the platform.
|
||||
// Flatten errors are added to tflog.
|
||||
func DiskSnapshotResource(ctx context.Context, plan *models.ResourceDiskSnapshotModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "Start flattens.DiskSnapshotResource", map[string]any{
|
||||
"disk_id": plan.DiskID.ValueInt64(),
|
||||
"label": plan.Label.ValueString()})
|
||||
|
||||
recordSnapshot, diags := utilities.DiskSnapshotCheckPresence(ctx, plan, c)
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskSnapshotResource: before flatten", map[string]any{
|
||||
"disk_id": plan.DiskID.ValueInt64(),
|
||||
"label": plan.Label.ValueString(),
|
||||
"recordDisk": recordSnapshot})
|
||||
|
||||
id := plan.Id
|
||||
if !strings.Contains(id.ValueString(), "#") {
|
||||
id = types.StringValue(fmt.Sprintf("%d#%s", plan.DiskID.ValueInt64(), plan.Label.ValueString()))
|
||||
}
|
||||
*plan = models.ResourceDiskSnapshotModel{
|
||||
// required fields
|
||||
DiskID: plan.DiskID,
|
||||
Label: types.StringValue(recordSnapshot.Label),
|
||||
|
||||
// optional fields
|
||||
Rollback: plan.Rollback,
|
||||
TimeStamp: plan.TimeStamp,
|
||||
Timeouts: plan.Timeouts,
|
||||
|
||||
// computed fields
|
||||
Id: id,
|
||||
GUID: types.StringValue(recordSnapshot.GUID),
|
||||
ResID: types.StringValue(recordSnapshot.ResID),
|
||||
SnapSetGUID: types.StringValue(recordSnapshot.SnapSetGUID),
|
||||
SnapSetTime: types.Int64Value(int64(recordSnapshot.SnapSetTime)),
|
||||
}
|
||||
|
||||
if plan.TimeStamp.IsUnknown() {
|
||||
plan.TimeStamp = types.Int64Value(int64(recordSnapshot.Timestamp))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "flattens.DiskResource: after flatten", map[string]any{
|
||||
"disk_id": plan.DiskID.ValueInt64(),
|
||||
"label": plan.Label.ValueString()})
|
||||
|
||||
return nil
|
||||
}
|
||||
122
internal/service/cloudbroker/disks/input_checks.go
Normal file
122
internal/service/cloudbroker/disks/input_checks.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package cbDisks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/ic"
|
||||
)
|
||||
|
||||
// resourceDiskReplicationInputChecks checks if user provided disk_id exist on the platform during disk replication.
|
||||
func resourceDiskReplicationInputChecks(ctx context.Context, plan *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
diskId := uint64(plan.DiskId.ValueInt64())
|
||||
tflog.Info(ctx, "resourceDiskCreateInputChecks: exist disk check", map[string]any{"disk_id": diskId})
|
||||
err := CheckTatlinDiskID(ctx, diskId, c)
|
||||
if err != nil {
|
||||
diags.AddError(fmt.Sprintf("Cannot get info about disk with ID %v", diskId), err.Error())
|
||||
}
|
||||
|
||||
return diags
|
||||
|
||||
}
|
||||
|
||||
// resourceDiskUpdateInputChecks checks if user provided:
|
||||
// account_id and gid exist on the platform during disk creation,
|
||||
// description, pool, sep_id, type are not attempted to be changed.
|
||||
func resourceDiskUpdateInputChecks(ctx context.Context, plan, state *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
// check accountId and gid
|
||||
diags.Append(resourceDiskCreateInputChecks(ctx, plan, c)...)
|
||||
|
||||
// check description
|
||||
if !plan.Description.Equal(state.Description) && !plan.Description.IsUnknown() {
|
||||
diags.AddError(
|
||||
"resourceDiskUpdateInputChecks: description change is not allowed",
|
||||
fmt.Sprintf("cannot change description from %s to %s for disk id %s",
|
||||
state.Description.ValueString(),
|
||||
plan.Description.ValueString(),
|
||||
plan.Id.ValueString()))
|
||||
}
|
||||
|
||||
// check pool
|
||||
if !plan.Pool.Equal(state.Pool) && !plan.Pool.IsUnknown() {
|
||||
diags.AddError(
|
||||
"resourceDiskUpdateInputChecks: pool change is not allowed",
|
||||
fmt.Sprintf("cannot change pool from %s to %s for disk id %s",
|
||||
state.Pool.ValueString(),
|
||||
plan.Pool.ValueString(),
|
||||
plan.Id.ValueString()))
|
||||
}
|
||||
|
||||
// check sep_id
|
||||
if !plan.SEPID.Equal(state.SEPID) && !plan.SEPID.IsUnknown() {
|
||||
diags.AddError(
|
||||
"resourceDiskUpdateInputChecks: sep_id change is not allowed",
|
||||
fmt.Sprintf("cannot change sep_id from %d to %d for disk id %s",
|
||||
state.SEPID.ValueInt64(),
|
||||
plan.SEPID.ValueInt64(),
|
||||
plan.Id.ValueString()))
|
||||
}
|
||||
|
||||
// check type
|
||||
if !plan.Type.Equal(state.Type) && !plan.Type.IsUnknown() {
|
||||
diags.AddError(
|
||||
"resourceDiskUpdateInputChecks: type change is not allowed",
|
||||
fmt.Sprintf("cannot change type from %s to %s for disk id %s",
|
||||
state.Type.ValueString(),
|
||||
plan.Type.ValueString(),
|
||||
plan.Id.ValueString()))
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
// resourceDiskCreateInputChecks checks if user provided account_id and gid exist on the platform during disk creation.
|
||||
func resourceDiskCreateInputChecks(ctx context.Context, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
accountId := uint64(plan.AccountID.ValueInt64())
|
||||
tflog.Info(ctx, "resourceDiskCreateInputChecks: exist account check", map[string]any{"account_id": accountId})
|
||||
err := ic.ExistAccount(ctx, accountId, c)
|
||||
if err != nil {
|
||||
diags.AddError(fmt.Sprintf("Cannot get info about account with ID %v", accountId), err.Error())
|
||||
}
|
||||
|
||||
gid := uint64(plan.GID.ValueInt64())
|
||||
tflog.Info(ctx, "resourceDiskCreateInputChecks: exist gid check", map[string]any{"gid": gid})
|
||||
err = ic.ExistGID(ctx, gid, c)
|
||||
if err != nil {
|
||||
diags.AddError(fmt.Sprintf("Cannot get info about GID %v", gid), err.Error())
|
||||
}
|
||||
return diags
|
||||
}
|
||||
|
||||
func CheckTatlinDiskID(ctx context.Context, diskId uint64, c *decort.DecortClient) error {
|
||||
|
||||
req := disks.ListRequest{
|
||||
ByID: diskId,
|
||||
}
|
||||
|
||||
diskList, err := c.CloudBroker().Disks().List(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(diskList.Data) == 0 {
|
||||
return fmt.Errorf("DiskID %d is not allowed or does not exist", diskId)
|
||||
}
|
||||
|
||||
if diskList.Data[0].SEPType != "TATLIN" {
|
||||
return fmt.Errorf("DiskID %d is not allowed or does not exist", diskId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,61 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DiskModel struct {
|
||||
// request fields
|
||||
DiskID types.Int64 `tfsdk:"disk_id"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// computed fields
|
||||
MachineID types.Int64 `tfsdk:"machine_id"`
|
||||
MachineName types.String `tfsdk:"machine_name"`
|
||||
DeviceName types.String `tfsdk:"devicename"`
|
||||
SEPType types.String `tfsdk:"sep_type"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
ACL types.String `tfsdk:"acl"`
|
||||
BootPartition types.Int64 `tfsdk:"boot_partition"`
|
||||
Computes []ComputeModel `tfsdk:"computes"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
Description types.String `tfsdk:"desc"`
|
||||
DestructionTime types.Int64 `tfsdk:"destruction_time"`
|
||||
DiskPath types.String `tfsdk:"disk_path"`
|
||||
GID types.Int64 `tfsdk:"gid"`
|
||||
GUID types.Int64 `tfsdk:"guid"`
|
||||
ImageID types.Int64 `tfsdk:"image_id"`
|
||||
Images types.List `tfsdk:"images"`
|
||||
IOTune *IOModel `tfsdk:"iotune"`
|
||||
IQN types.String `tfsdk:"iqn"`
|
||||
Login types.String `tfsdk:"login"`
|
||||
Milestones types.Int64 `tfsdk:"milestones"`
|
||||
Name types.String `tfsdk:"disk_name"`
|
||||
Order types.Int64 `tfsdk:"order"`
|
||||
Params types.String `tfsdk:"params"`
|
||||
ParentID types.Int64 `tfsdk:"parent_id"`
|
||||
Password types.String `tfsdk:"passwd"`
|
||||
PCISlot types.Int64 `tfsdk:"pci_slot"`
|
||||
Pool types.String `tfsdk:"pool"`
|
||||
PresentTo types.List `tfsdk:"present_to"`
|
||||
PurgeAttempts types.Int64 `tfsdk:"purge_attempts"`
|
||||
PurgeTime types.Int64 `tfsdk:"purge_time"`
|
||||
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
|
||||
ReferenceID types.String `tfsdk:"reference_id"`
|
||||
Replication *ItemReplModel `tfsdk:"replication"`
|
||||
ResID types.String `tfsdk:"res_id"`
|
||||
ResName types.String `tfsdk:"res_name"`
|
||||
Role types.String `tfsdk:"role"`
|
||||
SEPID types.Int64 `tfsdk:"sep_id"`
|
||||
Shareable types.Bool `tfsdk:"shareable"`
|
||||
SizeMax types.Int64 `tfsdk:"size_max"`
|
||||
SizeUsed types.Float64 `tfsdk:"size_used"`
|
||||
Snapshots []SnapshotModel `tfsdk:"snapshots"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
TechStatus types.String `tfsdk:"tech_status"`
|
||||
Type types.String `tfsdk:"type"`
|
||||
VMID types.Int64 `tfsdk:"vmid"`
|
||||
}
|
||||
@@ -0,0 +1,116 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DisksModel struct {
|
||||
// request fields
|
||||
ByID types.Int64 `tfsdk:"by_id"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
DiskMaxSize types.Int64 `tfsdk:"disk_max_size"`
|
||||
Shared types.Bool `tfsdk:"shared"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
Type types.String `tfsdk:"type"`
|
||||
SortBy types.String `tfsdk:"sort_by"`
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
//computed fields
|
||||
Items []DiskItemModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type DiskItemModel struct {
|
||||
MachineID types.Int64 `tfsdk:"machine_id"`
|
||||
MachineName types.String `tfsdk:"machine_name"`
|
||||
DeviceName types.String `tfsdk:"devicename"`
|
||||
SEPType types.String `tfsdk:"sep_type"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
ACL types.String `tfsdk:"acl"`
|
||||
BootPartition types.Int64 `tfsdk:"boot_partition"`
|
||||
Computes []ComputeModel `tfsdk:"computes"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
Description types.String `tfsdk:"desc"`
|
||||
DestructionTime types.Int64 `tfsdk:"destruction_time"`
|
||||
DiskPath types.String `tfsdk:"disk_path"`
|
||||
GID types.Int64 `tfsdk:"gid"`
|
||||
GUID types.Int64 `tfsdk:"guid"`
|
||||
ID types.Int64 `tfsdk:"disk_id"`
|
||||
ImageID types.Int64 `tfsdk:"image_id"`
|
||||
Images types.List `tfsdk:"images"`
|
||||
IOTune *IOModel `tfsdk:"iotune"`
|
||||
IQN types.String `tfsdk:"iqn"`
|
||||
Login types.String `tfsdk:"login"`
|
||||
Milestones types.Int64 `tfsdk:"milestones"`
|
||||
Name types.String `tfsdk:"disk_name"`
|
||||
Order types.Int64 `tfsdk:"order"`
|
||||
Params types.String `tfsdk:"params"`
|
||||
ParentID types.Int64 `tfsdk:"parent_id"`
|
||||
Password types.String `tfsdk:"passwd"`
|
||||
PCISlot types.Int64 `tfsdk:"pci_slot"`
|
||||
Pool types.String `tfsdk:"pool"`
|
||||
PresentTo types.List `tfsdk:"present_to"`
|
||||
PurgeAttempts types.Int64 `tfsdk:"purge_attempts"`
|
||||
PurgeTime types.Int64 `tfsdk:"purge_time"`
|
||||
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
|
||||
ReferenceID types.String `tfsdk:"reference_id"`
|
||||
Replication *ItemReplModel `tfsdk:"replication"`
|
||||
ResID types.String `tfsdk:"res_id"`
|
||||
ResName types.String `tfsdk:"res_name"`
|
||||
Role types.String `tfsdk:"role"`
|
||||
SEPID types.Int64 `tfsdk:"sep_id"`
|
||||
Shareable types.Bool `tfsdk:"shareable"`
|
||||
SizeMax types.Int64 `tfsdk:"size_max"`
|
||||
SizeUsed types.Float64 `tfsdk:"size_used"`
|
||||
Snapshots []SnapshotModel `tfsdk:"snapshots"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
TechStatus types.String `tfsdk:"tech_status"`
|
||||
Type types.String `tfsdk:"type"`
|
||||
VMID types.Int64 `tfsdk:"vmid"`
|
||||
}
|
||||
|
||||
type ComputeModel struct {
|
||||
ComputeID types.String `tfsdk:"compute_id"`
|
||||
ComputeName types.String `tfsdk:"compute_name"`
|
||||
}
|
||||
|
||||
type ItemReplModel struct {
|
||||
DiskID types.Int64 `tfsdk:"disk_id"`
|
||||
PoolID types.String `tfsdk:"pool_id"`
|
||||
Role types.String `tfsdk:"role"`
|
||||
SelfVolumeID types.String `tfsdk:"self_volume_id"`
|
||||
StorageID types.String `tfsdk:"storage_id"`
|
||||
VolumeID types.String `tfsdk:"volume_id"`
|
||||
}
|
||||
|
||||
type IOModel struct {
|
||||
ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"`
|
||||
ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"`
|
||||
ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"`
|
||||
ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"`
|
||||
SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"`
|
||||
TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"`
|
||||
TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"`
|
||||
TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"`
|
||||
TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"`
|
||||
WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"`
|
||||
WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"`
|
||||
WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"`
|
||||
WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"`
|
||||
}
|
||||
|
||||
type SnapshotModel struct {
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Label types.String `tfsdk:"label"`
|
||||
ReferenceID types.String `tfsdk:"reference_id"`
|
||||
ResID types.String `tfsdk:"res_id"`
|
||||
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
|
||||
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
|
||||
Timestamp types.Int64 `tfsdk:"timestamp"`
|
||||
}
|
||||
@@ -0,0 +1,116 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type ListDisksModel struct {
|
||||
// request fields
|
||||
ByID types.Int64 `tfsdk:"by_id"`
|
||||
Name types.String `tfsdk:"name"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
DiskMaxSize types.Int64 `tfsdk:"disk_max_size"`
|
||||
Shared types.Bool `tfsdk:"shared"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
Type types.String `tfsdk:"type"`
|
||||
SortBy types.String `tfsdk:"sort_by"`
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
//computed fields
|
||||
Items []ItemDiskModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemDiskModel struct {
|
||||
MachineID types.Int64 `tfsdk:"machine_id"`
|
||||
MachineName types.String `tfsdk:"machine_name"`
|
||||
DeviceName types.String `tfsdk:"devicename"`
|
||||
SEPType types.String `tfsdk:"sep_type"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
ACL types.String `tfsdk:"acl"`
|
||||
BootPartition types.Int64 `tfsdk:"boot_partition"`
|
||||
Computes []ComputesModel `tfsdk:"computes"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
Description types.String `tfsdk:"desc"`
|
||||
DestructionTime types.Int64 `tfsdk:"destruction_time"`
|
||||
DiskPath types.String `tfsdk:"disk_path"`
|
||||
GID types.Int64 `tfsdk:"gid"`
|
||||
GUID types.Int64 `tfsdk:"guid"`
|
||||
ID types.Int64 `tfsdk:"disk_id"`
|
||||
ImageID types.Int64 `tfsdk:"image_id"`
|
||||
Images types.List `tfsdk:"images"`
|
||||
IOTune *IOTuneModel `tfsdk:"iotune"`
|
||||
IQN types.String `tfsdk:"iqn"`
|
||||
Login types.String `tfsdk:"login"`
|
||||
Milestones types.Int64 `tfsdk:"milestones"`
|
||||
Name types.String `tfsdk:"disk_name"`
|
||||
Order types.Int64 `tfsdk:"order"`
|
||||
Params types.String `tfsdk:"params"`
|
||||
ParentID types.Int64 `tfsdk:"parent_id"`
|
||||
Password types.String `tfsdk:"passwd"`
|
||||
PCISlot types.Int64 `tfsdk:"pci_slot"`
|
||||
Pool types.String `tfsdk:"pool"`
|
||||
PresentTo types.List `tfsdk:"present_to"`
|
||||
PurgeAttempts types.Int64 `tfsdk:"purge_attempts"`
|
||||
PurgeTime types.Int64 `tfsdk:"purge_time"`
|
||||
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
|
||||
ReferenceID types.String `tfsdk:"reference_id"`
|
||||
Replication *ItemReplicationModel `tfsdk:"replication"`
|
||||
ResID types.String `tfsdk:"res_id"`
|
||||
ResName types.String `tfsdk:"res_name"`
|
||||
Role types.String `tfsdk:"role"`
|
||||
SEPID types.Int64 `tfsdk:"sep_id"`
|
||||
Shareable types.Bool `tfsdk:"shareable"`
|
||||
SizeMax types.Int64 `tfsdk:"size_max"`
|
||||
SizeUsed types.Float64 `tfsdk:"size_used"`
|
||||
Snapshots []ItemSnapshotModel `tfsdk:"snapshots"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
TechStatus types.String `tfsdk:"tech_status"`
|
||||
Type types.String `tfsdk:"type"`
|
||||
VMID types.Int64 `tfsdk:"vmid"`
|
||||
}
|
||||
|
||||
type ComputesModel struct {
|
||||
ComputeID types.String `tfsdk:"compute_id"`
|
||||
ComputeName types.String `tfsdk:"compute_name"`
|
||||
}
|
||||
|
||||
type ItemReplicationModel struct {
|
||||
DiskID types.Int64 `tfsdk:"disk_id"`
|
||||
PoolID types.String `tfsdk:"pool_id"`
|
||||
Role types.String `tfsdk:"role"`
|
||||
SelfVolumeID types.String `tfsdk:"self_volume_id"`
|
||||
StorageID types.String `tfsdk:"storage_id"`
|
||||
VolumeID types.String `tfsdk:"volume_id"`
|
||||
}
|
||||
|
||||
type IOTuneModel struct {
|
||||
ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"`
|
||||
ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"`
|
||||
ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"`
|
||||
ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"`
|
||||
SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"`
|
||||
TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"`
|
||||
TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"`
|
||||
TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"`
|
||||
TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"`
|
||||
WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"`
|
||||
WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"`
|
||||
WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"`
|
||||
WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"`
|
||||
}
|
||||
|
||||
type ItemSnapshotModel struct {
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Label types.String `tfsdk:"label"`
|
||||
ReferenceID types.String `tfsdk:"reference_id"`
|
||||
ResID types.String `tfsdk:"res_id"`
|
||||
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
|
||||
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
|
||||
Timestamp types.Int64 `tfsdk:"timestamp"`
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceDiskListTypesModel struct {
|
||||
// request fields - optional
|
||||
SortBy types.String `tfsdk:"sort_by"`
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items types.List `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceDiskListTypesDetailedModel struct {
|
||||
// request fields - optional
|
||||
SortBy types.String `tfsdk:"sort_by"`
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemDiskTypeDetailedModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemDiskTypeDetailedModel struct {
|
||||
Pools []ItemPoolModel `tfsdk:"pools"`
|
||||
SepID types.Int64 `tfsdk:"sep_id"`
|
||||
SepName types.String `tfsdk:"sep_name"`
|
||||
}
|
||||
|
||||
type ItemPoolModel struct {
|
||||
Name types.String `tfsdk:"name"`
|
||||
System types.String `tfsdk:"system"`
|
||||
Types types.List `tfsdk:"types"`
|
||||
}
|
||||
@@ -0,0 +1,109 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/attr"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceDiskListUnattachedModel struct {
|
||||
// request fields
|
||||
ByID types.Int64 `tfsdk:"by_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
DiskMaxSize types.Int64 `tfsdk:"disk_max_size"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
SepID types.Int64 `tfsdk:"sep_id"`
|
||||
PoolName types.String `tfsdk:"pool_name"`
|
||||
Type types.String `tfsdk:"type"`
|
||||
SortBy types.String `tfsdk:"sort_by"`
|
||||
Page types.Int64 `tfsdk:"page"`
|
||||
Size types.Int64 `tfsdk:"size"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []ItemDiskUnattachedModel `tfsdk:"items"`
|
||||
EntryCount types.Int64 `tfsdk:"entry_count"`
|
||||
}
|
||||
|
||||
type ItemDiskUnattachedModel struct {
|
||||
CKey types.String `tfsdk:"ckey"`
|
||||
Meta types.List `tfsdk:"meta"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
ACL types.String `tfsdk:"acl"`
|
||||
BootPartition types.Int64 `tfsdk:"boot_partition"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
Description types.String `tfsdk:"desc"`
|
||||
DestructionTime types.Int64 `tfsdk:"destruction_time"`
|
||||
DiskPath types.String `tfsdk:"disk_path"`
|
||||
GID types.Int64 `tfsdk:"gid"`
|
||||
GUID types.Int64 `tfsdk:"guid"`
|
||||
DiskId types.Int64 `tfsdk:"disk_id"`
|
||||
ImageID types.Int64 `tfsdk:"image_id"`
|
||||
Images types.List `tfsdk:"images"`
|
||||
IOTune types.Object `tfsdk:"iotune"`
|
||||
Iqn types.String `tfsdk:"iqn"`
|
||||
Login types.String `tfsdk:"login"`
|
||||
Milestones types.Int64 `tfsdk:"milestones"`
|
||||
DiskName types.String `tfsdk:"disk_name"`
|
||||
Order types.Int64 `tfsdk:"order"`
|
||||
Params types.String `tfsdk:"params"`
|
||||
ParentID types.Int64 `tfsdk:"parent_id"`
|
||||
Passwd types.String `tfsdk:"passwd"`
|
||||
PCISlot types.Int64 `tfsdk:"pci_slot"`
|
||||
Pool types.String `tfsdk:"pool"`
|
||||
PresentTo types.List `tfsdk:"present_to"`
|
||||
PurgeAttempts types.Int64 `tfsdk:"purge_attempts"`
|
||||
PurgeTime types.Int64 `tfsdk:"purge_time"`
|
||||
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
|
||||
ReferenceID types.String `tfsdk:"reference_id"`
|
||||
ResID types.String `tfsdk:"res_id"`
|
||||
ResName types.String `tfsdk:"res_name"`
|
||||
Role types.String `tfsdk:"role"`
|
||||
SepID types.Int64 `tfsdk:"sep_id"`
|
||||
Shareable types.Bool `tfsdk:"shareable"`
|
||||
SizeMax types.Int64 `tfsdk:"size_max"`
|
||||
SizeUsed types.Float64 `tfsdk:"size_used"`
|
||||
Snapshots types.List `tfsdk:"snapshots"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
TechStatus types.String `tfsdk:"tech_status"`
|
||||
Type types.String `tfsdk:"type"`
|
||||
VMID types.Int64 `tfsdk:"vmid"`
|
||||
}
|
||||
|
||||
var ItemIOTune = map[string]attr.Type{
|
||||
"read_bytes_sec": types.Int64Type,
|
||||
"read_bytes_sec_max": types.Int64Type,
|
||||
"read_iops_sec": types.Int64Type,
|
||||
"read_iops_sec_max": types.Int64Type,
|
||||
"size_iops_sec": types.Int64Type,
|
||||
"total_bytes_sec": types.Int64Type,
|
||||
"total_bytes_sec_max": types.Int64Type,
|
||||
"total_iops_sec": types.Int64Type,
|
||||
"total_iops_sec_max": types.Int64Type,
|
||||
"write_bytes_sec": types.Int64Type,
|
||||
"write_bytes_sec_max": types.Int64Type,
|
||||
"write_iops_sec": types.Int64Type,
|
||||
"write_iops_sec_max": types.Int64Type,
|
||||
}
|
||||
|
||||
type DiskUnattachedItemSnapshotModel struct {
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Label types.String `tfsdk:"label"`
|
||||
ResID types.String `tfsdk:"res_id"`
|
||||
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
|
||||
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
|
||||
Timestamp types.Int64 `tfsdk:"timestamp"`
|
||||
}
|
||||
|
||||
var ItemSnapshot = map[string]attr.Type{
|
||||
"guid": types.StringType,
|
||||
"label": types.StringType,
|
||||
"res_id": types.StringType,
|
||||
"snap_set_guid": types.StringType,
|
||||
"snap_set_time": types.Int64Type,
|
||||
"timestamp": types.Int64Type,
|
||||
}
|
||||
@@ -0,0 +1,115 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/attr"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type RecordDiskModel struct {
|
||||
// request fields - required
|
||||
DiskId types.Int64 `tfsdk:"disk_id"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
ACL types.String `tfsdk:"acl"`
|
||||
BootPartition types.Int64 `tfsdk:"boot_partition"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
Computes types.List `tfsdk:"computes"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
DeviceName types.String `tfsdk:"devicename"`
|
||||
DiskPath types.String `tfsdk:"disk_path"`
|
||||
Description types.String `tfsdk:"desc"`
|
||||
DestructionTime types.Int64 `tfsdk:"destruction_time"`
|
||||
GID types.Int64 `tfsdk:"gid"`
|
||||
GUID types.Int64 `tfsdk:"guid"`
|
||||
ImageID types.Int64 `tfsdk:"image_id"`
|
||||
Images types.List `tfsdk:"images"`
|
||||
IOTune *DiskReplicationIOTune `tfsdk:"iotune"`
|
||||
IQN types.String `tfsdk:"iqn"`
|
||||
Login types.String `tfsdk:"login"`
|
||||
Milestones types.Int64 `tfsdk:"milestones"`
|
||||
Name types.String `tfsdk:"disk_name"`
|
||||
Order types.Int64 `tfsdk:"order"`
|
||||
Params types.String `tfsdk:"params"`
|
||||
ParentID types.Int64 `tfsdk:"parent_id"`
|
||||
Passwd types.String `tfsdk:"passwd"`
|
||||
PCISlot types.Int64 `tfsdk:"pci_slot"`
|
||||
Pool types.String `tfsdk:"pool"`
|
||||
PresentTo types.List `tfsdk:"present_to"`
|
||||
PurgeAttempts types.Int64 `tfsdk:"purge_attempts"`
|
||||
PurgeTime types.Int64 `tfsdk:"purge_time"`
|
||||
Replication *ItemReplicationModel `tfsdk:"replication"`
|
||||
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
|
||||
ReferenceID types.String `tfsdk:"reference_id"`
|
||||
ResID types.String `tfsdk:"res_id"`
|
||||
ResName types.String `tfsdk:"res_name"`
|
||||
Role types.String `tfsdk:"role"`
|
||||
SepType types.String `tfsdk:"sep_type"`
|
||||
SepID types.Int64 `tfsdk:"sep_id"`
|
||||
Shareable types.Bool `tfsdk:"shareable"`
|
||||
SizeMax types.Int64 `tfsdk:"size_max"`
|
||||
SizeUsed types.Float64 `tfsdk:"size_used"`
|
||||
Snapshots types.List `tfsdk:"snapshots"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
StatusReplication types.String `tfsdk:"status_replication"`
|
||||
TechStatus types.String `tfsdk:"tech_status"`
|
||||
Type types.String `tfsdk:"type"`
|
||||
VMID types.Int64 `tfsdk:"vmid"`
|
||||
}
|
||||
|
||||
type DiskReplicationIOTune struct {
|
||||
ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"`
|
||||
ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"`
|
||||
ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"`
|
||||
ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"`
|
||||
SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"`
|
||||
TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"`
|
||||
TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"`
|
||||
TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"`
|
||||
TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"`
|
||||
WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"`
|
||||
WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"`
|
||||
WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"`
|
||||
WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"`
|
||||
}
|
||||
|
||||
type ReplicationModel struct {
|
||||
DiskID types.Int64 `tfsdk:"disk_id"`
|
||||
PoolID types.String `tfsdk:"pool_id"`
|
||||
Role types.String `tfsdk:"role"`
|
||||
SelfVolumeID types.String `tfsdk:"self_volume_id"`
|
||||
StorageID types.String `tfsdk:"storage_id"`
|
||||
VolumeID types.String `tfsdk:"volume_id"`
|
||||
}
|
||||
|
||||
type DiskReplicationItemSnapshot struct {
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Label types.String `tfsdk:"label"`
|
||||
ReferenceID types.String `tfsdk:"reference_id"`
|
||||
ResID types.String `tfsdk:"res_id"`
|
||||
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
|
||||
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
|
||||
TimeStamp types.Int64 `tfsdk:"timestamp"`
|
||||
}
|
||||
|
||||
var DiskReplicationSnapshot = map[string]attr.Type{
|
||||
"guid": types.StringType,
|
||||
"label": types.StringType,
|
||||
"res_id": types.StringType,
|
||||
"snap_set_guid": types.StringType,
|
||||
"snap_set_time": types.Int64Type,
|
||||
"timeStamp": types.Int64Type,
|
||||
}
|
||||
|
||||
type ItemComputeModel struct {
|
||||
ComputeId types.String `tfsdk:"compute_id"`
|
||||
ComputeName types.String `tfsdk:"compute_name"`
|
||||
}
|
||||
|
||||
var ItemCompute = map[string]attr.Type{
|
||||
"compute_id": types.StringType,
|
||||
"compute_name": types.StringType,
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceDiskSnapshotModel struct {
|
||||
// request fields
|
||||
DiskID types.Int64 `tfsdk:"disk_id"`
|
||||
Label types.String `tfsdk:"label"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
ResID types.String `tfsdk:"res_id"`
|
||||
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
|
||||
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
|
||||
TimeStamp types.Int64 `tfsdk:"timestamp"`
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type DataSourceDiskSnapshotListModel struct {
|
||||
// request fields
|
||||
DiskID types.Int64 `tfsdk:"disk_id"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
Items []DiskItemSnapshot `tfsdk:"items"`
|
||||
}
|
||||
|
||||
type DiskItemSnapshot struct {
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Label types.String `tfsdk:"label"`
|
||||
ReferenceID types.String `tfsdk:"reference_id"`
|
||||
ResID types.String `tfsdk:"res_id"`
|
||||
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
|
||||
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
|
||||
TimeStamp types.Int64 `tfsdk:"timestamp"`
|
||||
}
|
||||
@@ -0,0 +1,127 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/attr"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type ResourceDiskModel struct {
|
||||
// request fields - required
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
DiskName types.String `tfsdk:"disk_name"`
|
||||
SizeMax types.Int64 `tfsdk:"size_max"`
|
||||
GID types.Int64 `tfsdk:"gid"`
|
||||
|
||||
// request fields - optional
|
||||
SSDSize types.Int64 `tfsdk:"ssd_size"`
|
||||
NodeIDs types.List `tfsdk:"node_ids"`
|
||||
Description types.String `tfsdk:"desc"`
|
||||
Pool types.String `tfsdk:"pool"`
|
||||
SEPID types.Int64 `tfsdk:"sep_id"`
|
||||
Type types.String `tfsdk:"type"`
|
||||
Detach types.Bool `tfsdk:"detach"`
|
||||
Permanently types.Bool `tfsdk:"permanently"`
|
||||
Shareable types.Bool `tfsdk:"shareable"`
|
||||
IOTune types.Object `tfsdk:"iotune"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
LastUpdated types.String `tfsdk:"last_updated"`
|
||||
ACL types.String `tfsdk:"acl"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
BootPartition types.Int64 `tfsdk:"boot_partition"`
|
||||
Computes types.List `tfsdk:"computes"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
DeviceName types.String `tfsdk:"devicename"`
|
||||
DestructionTime types.Int64 `tfsdk:"destruction_time"`
|
||||
DiskId types.Int64 `tfsdk:"disk_id"`
|
||||
DiskPath types.String `tfsdk:"disk_path"`
|
||||
Guid types.Int64 `tfsdk:"guid"`
|
||||
ImageID types.Int64 `tfsdk:"image_id"`
|
||||
Images types.List `tfsdk:"images"`
|
||||
IQN types.String `tfsdk:"iqn"`
|
||||
Login types.String `tfsdk:"login"`
|
||||
Milestones types.Int64 `tfsdk:"milestones"`
|
||||
Order types.Int64 `tfsdk:"order"`
|
||||
Params types.String `tfsdk:"params"`
|
||||
ParentID types.Int64 `tfsdk:"parent_id"`
|
||||
Passwd types.String `tfsdk:"passwd"`
|
||||
PCISlot types.Int64 `tfsdk:"pci_slot"`
|
||||
PresentTo types.List `tfsdk:"present_to"`
|
||||
PurgeAttempts types.Int64 `tfsdk:"purge_attempts"`
|
||||
PurgeTime types.Int64 `tfsdk:"purge_time"`
|
||||
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
|
||||
ReferenceID types.String `tfsdk:"reference_id"`
|
||||
ResID types.String `tfsdk:"res_id"`
|
||||
ResName types.String `tfsdk:"res_name"`
|
||||
Role types.String `tfsdk:"role"`
|
||||
SepType types.String `tfsdk:"sep_type"`
|
||||
SizeUsed types.Float64 `tfsdk:"size_used"`
|
||||
Snapshots types.List `tfsdk:"snapshots"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
TechStatus types.String `tfsdk:"tech_status"`
|
||||
VMID types.Int64 `tfsdk:"vmid"`
|
||||
}
|
||||
|
||||
type ResourceIOTuneModel struct {
|
||||
ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"`
|
||||
ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"`
|
||||
ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"`
|
||||
ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"`
|
||||
SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"`
|
||||
TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"`
|
||||
TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"`
|
||||
TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"`
|
||||
TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"`
|
||||
WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"`
|
||||
WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"`
|
||||
WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"`
|
||||
WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"`
|
||||
}
|
||||
|
||||
type ItemSnapshotsModel struct {
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Label types.String `tfsdk:"label"`
|
||||
ResID types.String `tfsdk:"res_id"`
|
||||
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
|
||||
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
|
||||
TimeStamp types.Int64 `tfsdk:"timestamp"`
|
||||
}
|
||||
|
||||
type ItemComputesModel struct {
|
||||
ComputeId types.String `tfsdk:"compute_id"`
|
||||
ComputeName types.String `tfsdk:"compute_name"`
|
||||
}
|
||||
|
||||
var ItemComputes = map[string]attr.Type{
|
||||
"compute_id": types.StringType,
|
||||
"compute_name": types.StringType,
|
||||
}
|
||||
|
||||
var ItemSnapshots = map[string]attr.Type{
|
||||
"guid": types.StringType,
|
||||
"label": types.StringType,
|
||||
"res_id": types.StringType,
|
||||
"snap_set_guid": types.StringType,
|
||||
"snap_set_time": types.Int64Type,
|
||||
"timestamp": types.Int64Type,
|
||||
}
|
||||
|
||||
var ResourceItemIOTune = map[string]attr.Type{
|
||||
"read_bytes_sec": types.Int64Type,
|
||||
"read_bytes_sec_max": types.Int64Type,
|
||||
"read_iops_sec": types.Int64Type,
|
||||
"read_iops_sec_max": types.Int64Type,
|
||||
"size_iops_sec": types.Int64Type,
|
||||
"total_bytes_sec": types.Int64Type,
|
||||
"total_bytes_sec_max": types.Int64Type,
|
||||
"total_iops_sec": types.Int64Type,
|
||||
"total_iops_sec_max": types.Int64Type,
|
||||
"write_bytes_sec": types.Int64Type,
|
||||
"write_bytes_sec_max": types.Int64Type,
|
||||
"write_iops_sec": types.Int64Type,
|
||||
"write_iops_sec_max": types.Int64Type,
|
||||
}
|
||||
@@ -0,0 +1,147 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/attr"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type ResourceRecordDiskReplicationModel struct {
|
||||
// request fields
|
||||
DiskId types.Int64 `tfsdk:"disk_id"`
|
||||
Name types.String `tfsdk:"disk_name"`
|
||||
SepID types.Int64 `tfsdk:"sep_id"`
|
||||
PoolName types.String `tfsdk:"pool_name"`
|
||||
Pause types.Bool `tfsdk:"pause"`
|
||||
Reverse types.Bool `tfsdk:"reverse"`
|
||||
Start types.Bool `tfsdk:"start"`
|
||||
Detach types.Bool `tfsdk:"detach"`
|
||||
Permanently types.Bool `tfsdk:"permanently"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
ACL types.String `tfsdk:"acl"`
|
||||
BootPartition types.Int64 `tfsdk:"boot_partition"`
|
||||
AccountID types.Int64 `tfsdk:"account_id"`
|
||||
AccountName types.String `tfsdk:"account_name"`
|
||||
Computes types.List `tfsdk:"computes"`
|
||||
CreatedTime types.Int64 `tfsdk:"created_time"`
|
||||
DeletedTime types.Int64 `tfsdk:"deleted_time"`
|
||||
DeviceName types.String `tfsdk:"devicename"`
|
||||
DiskPath types.String `tfsdk:"disk_path"`
|
||||
Description types.String `tfsdk:"desc"`
|
||||
DestructionTime types.Int64 `tfsdk:"destruction_time"`
|
||||
GID types.Int64 `tfsdk:"gid"`
|
||||
GUID types.Int64 `tfsdk:"guid"`
|
||||
ImageID types.Int64 `tfsdk:"image_id"`
|
||||
ReplicationId types.Int64 `tfsdk:"replica_disk_id"`
|
||||
Images types.List `tfsdk:"images"`
|
||||
IOTune types.Object `tfsdk:"iotune"`
|
||||
IQN types.String `tfsdk:"iqn"`
|
||||
Login types.String `tfsdk:"login"`
|
||||
Milestones types.Int64 `tfsdk:"milestones"`
|
||||
Order types.Int64 `tfsdk:"order"`
|
||||
Params types.String `tfsdk:"params"`
|
||||
ParentID types.Int64 `tfsdk:"parent_id"`
|
||||
Passwd types.String `tfsdk:"passwd"`
|
||||
PCISlot types.Int64 `tfsdk:"pci_slot"`
|
||||
Pool types.String `tfsdk:"pool"`
|
||||
PresentTo types.List `tfsdk:"present_to"`
|
||||
PurgeAttempts types.Int64 `tfsdk:"purge_attempts"`
|
||||
PurgeTime types.Int64 `tfsdk:"purge_time"`
|
||||
Replication types.Object `tfsdk:"replication"`
|
||||
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
|
||||
ReferenceID types.String `tfsdk:"reference_id"`
|
||||
ResID types.String `tfsdk:"res_id"`
|
||||
ResName types.String `tfsdk:"res_name"`
|
||||
Role types.String `tfsdk:"role"`
|
||||
SepType types.String `tfsdk:"sep_type"`
|
||||
Shareable types.Bool `tfsdk:"shareable"`
|
||||
SizeMax types.Int64 `tfsdk:"size_max"`
|
||||
SizeUsed types.Float64 `tfsdk:"size_used"`
|
||||
Snapshots types.List `tfsdk:"snapshots"`
|
||||
Status types.String `tfsdk:"status"`
|
||||
StatusReplication types.String `tfsdk:"status_replication"`
|
||||
TechStatus types.String `tfsdk:"tech_status"`
|
||||
Type types.String `tfsdk:"type"`
|
||||
VMID types.Int64 `tfsdk:"vmid"`
|
||||
}
|
||||
|
||||
type ResourceDiskReplicationIOTuneModel struct {
|
||||
ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"`
|
||||
ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"`
|
||||
ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"`
|
||||
ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"`
|
||||
SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"`
|
||||
TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"`
|
||||
TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"`
|
||||
TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"`
|
||||
TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"`
|
||||
WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"`
|
||||
WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"`
|
||||
WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"`
|
||||
WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"`
|
||||
}
|
||||
|
||||
var ResourceDiskReplicationIOTune = map[string]attr.Type{
|
||||
"read_bytes_sec": types.Int64Type,
|
||||
"read_bytes_sec_max": types.Int64Type,
|
||||
"read_iops_sec": types.Int64Type,
|
||||
"read_iops_sec_max": types.Int64Type,
|
||||
"size_iops_sec": types.Int64Type,
|
||||
"total_bytes_sec": types.Int64Type,
|
||||
"total_bytes_sec_max": types.Int64Type,
|
||||
"total_iops_sec": types.Int64Type,
|
||||
"total_iops_sec_max": types.Int64Type,
|
||||
"write_bytes_sec": types.Int64Type,
|
||||
"write_bytes_sec_max": types.Int64Type,
|
||||
"write_iops_sec": types.Int64Type,
|
||||
"write_iops_sec_max": types.Int64Type,
|
||||
}
|
||||
|
||||
type ResourceItemReplicationModel struct {
|
||||
DiskID types.Int64 `tfsdk:"disk_id"`
|
||||
PoolID types.String `tfsdk:"pool_id"`
|
||||
Role types.String `tfsdk:"role"`
|
||||
SelfVolumeID types.String `tfsdk:"self_volume_id"`
|
||||
StorageID types.String `tfsdk:"storage_id"`
|
||||
VolumeID types.String `tfsdk:"volume_id"`
|
||||
}
|
||||
|
||||
var ResourceItemReplication = map[string]attr.Type{
|
||||
"disk_id": types.Int64Type,
|
||||
"pool_id": types.StringType,
|
||||
"role": types.StringType,
|
||||
"self_volume_id": types.StringType,
|
||||
"storage_id:": types.StringType,
|
||||
"volume_id": types.StringType,
|
||||
}
|
||||
|
||||
type ResourceReplicationItemSnapshotModel struct {
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
Label types.String `tfsdk:"label"`
|
||||
ResID types.String `tfsdk:"res_id"`
|
||||
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
|
||||
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
|
||||
TimeStamp types.Int64 `tfsdk:"timestamp"`
|
||||
}
|
||||
|
||||
var ResourceReplicationItemSnapshot = map[string]attr.Type{
|
||||
"guid": types.StringType,
|
||||
"label": types.StringType,
|
||||
"res_id": types.StringType,
|
||||
"snap_set_guid": types.StringType,
|
||||
"snap_set_time": types.Int64Type,
|
||||
"timestamp": types.Int64Type,
|
||||
}
|
||||
|
||||
type ResourceReplicationItemComputeModel struct {
|
||||
ComputeId types.String `tfsdk:"compute_id"`
|
||||
ComputeName types.String `tfsdk:"compute_name"`
|
||||
}
|
||||
|
||||
var ResourceReplicationItemCompute = map[string]attr.Type{
|
||||
"compute_id": types.StringType,
|
||||
"compute_name": types.StringType,
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
type ResourceDiskSnapshotModel struct {
|
||||
// request fields - required
|
||||
DiskID types.Int64 `tfsdk:"disk_id"`
|
||||
Label types.String `tfsdk:"label"`
|
||||
|
||||
// request fields - optional
|
||||
Rollback types.Bool `tfsdk:"rollback"`
|
||||
TimeStamp types.Int64 `tfsdk:"timestamp"`
|
||||
Timeouts timeouts.Value `tfsdk:"timeouts"`
|
||||
|
||||
// response fields
|
||||
Id types.String `tfsdk:"id"`
|
||||
LastUpdated types.String `tfsdk:"last_updated"`
|
||||
GUID types.String `tfsdk:"guid"`
|
||||
ResID types.String `tfsdk:"res_id"`
|
||||
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
|
||||
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
|
||||
}
|
||||
370
internal/service/cloudbroker/disks/resource_cb_disk.go
Normal file
370
internal/service/cloudbroker/disks/resource_cb_disk.go
Normal file
@@ -0,0 +1,370 @@
|
||||
package cbDisks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/path"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ resource.Resource = &resourceDisk{}
|
||||
_ resource.ResourceWithImportState = &resourceDisk{}
|
||||
)
|
||||
|
||||
// NewResourceDisk is a helper function to simplify the provider implementation.
|
||||
func NewResourceDisk() resource.Resource {
|
||||
return &resourceDisk{}
|
||||
}
|
||||
|
||||
// resourceDisk is the resource implementation.
|
||||
type resourceDisk struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
// Create creates the resource and sets the initial Terraform state.
|
||||
func (r *resourceDisk) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
|
||||
// Get plan to create resource group
|
||||
var plan models.ResourceDiskModel
|
||||
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Create resourceDisk: Error receiving the plan")
|
||||
return
|
||||
}
|
||||
|
||||
contextCreateMap := map[string]any{
|
||||
"account_id": plan.AccountID.ValueInt64(),
|
||||
"disk_name": plan.DiskName.ValueString(),
|
||||
"size_max": plan.SizeMax.ValueInt64(),
|
||||
"gid": plan.GID.ValueInt64(),
|
||||
}
|
||||
tflog.Info(ctx, "Create resourceDisk: start creating", contextCreateMap)
|
||||
|
||||
// Set timeouts
|
||||
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Create resourceDisk: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Create resourceDisk: set timeouts successfully", map[string]any{
|
||||
"account_id": plan.AccountID.ValueInt64(),
|
||||
"disk_name": plan.DiskName.ValueString(),
|
||||
"size_max": plan.SizeMax.ValueInt64(),
|
||||
"gid": plan.GID.ValueInt64(),
|
||||
"createTimeout": createTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, createTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Check if input values are valid in the platform
|
||||
tflog.Info(ctx, "Create resourceDisk: starting input checks", contextCreateMap)
|
||||
resp.Diagnostics.Append(resourceDiskCreateInputChecks(ctx, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Create resourceDisk: Error input checks")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Create resourceDisk: input checks successful", contextCreateMap)
|
||||
|
||||
// Make create request and get response
|
||||
createReq := utilities.CreateRequestResourceDisk(ctx, &plan)
|
||||
tflog.Info(ctx, "Create resourceDisk: before call CloudBroker().Disks().Create", map[string]any{"req": createReq})
|
||||
diskId, err := r.client.CloudBroker().Disks().Create(ctx, createReq)
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError(
|
||||
"Create resourceDisk: unable to Create Disk",
|
||||
err.Error(),
|
||||
)
|
||||
return
|
||||
}
|
||||
plan.Id = types.StringValue(strconv.Itoa(int(diskId)))
|
||||
tflog.Info(ctx, "Create resourceDisk: disk created", map[string]any{"diskId": diskId, "disk_name": plan.DiskName.ValueString()})
|
||||
|
||||
// additional settings after disk creation: in case of failures, warnings are added to resp.Diagnostics,
|
||||
// because additional settings failure is not critical. If errors were added instead of warnings, terraform
|
||||
// framework would mark resource as tainted and delete it, which would be unwanted behaviour.
|
||||
|
||||
if !plan.NodeIDs.IsUnknown() {
|
||||
resp.Diagnostics.Append(utilities.ResourceDiskChangeNodes(ctx, diskId, nil, &plan, true, r.client)...)
|
||||
}
|
||||
|
||||
if !plan.IOTune.IsUnknown() {
|
||||
resp.Diagnostics.Append(utilities.LimitIOCreateDisk(ctx, diskId, &plan, r.client)...)
|
||||
}
|
||||
|
||||
if !plan.Shareable.IsUnknown() && plan.Shareable.ValueBool() { // if shareable = true
|
||||
resp.Diagnostics.Append(utilities.ShareableCreateDisk(ctx, diskId, r.client)...)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "Create resourceDisk: resource creation is completed", map[string]any{"disk_id": diskId})
|
||||
|
||||
// Map response body to schema and populate Computed attribute values
|
||||
resp.Diagnostics.Append(flattens.DiskResource(ctx, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
// Set data last update
|
||||
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
|
||||
|
||||
// Set state to fully populated data
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Read refreshes the Terraform state with the latest data.
|
||||
func (r *resourceDisk) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
|
||||
// Get current state
|
||||
var state models.ResourceDiskModel
|
||||
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceDisk: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()})
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceDisk: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read resourceDisk: set timeouts successfully", map[string]any{
|
||||
"disk_id": state.Id.ValueString(),
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// read status
|
||||
resp.Diagnostics.Append(utilities.DiskReadStatus(ctx, &state, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceDisk: Error reading disk status")
|
||||
return
|
||||
}
|
||||
|
||||
// Overwrite items with refreshed state
|
||||
resp.Diagnostics.Append(flattens.DiskResource(ctx, &state, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceDisk: Error flatten disk")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceDisk: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read resourceDisk")
|
||||
}
|
||||
|
||||
// Update updates the resource and sets the updated Terraform state on success.
|
||||
func (r *resourceDisk) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
|
||||
// Retrieve values from plan
|
||||
var plan models.ResourceDiskModel
|
||||
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDisk: Error receiving the plan")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Update resourceDisk: got plan successfully", map[string]any{"disk_id": plan.Id.ValueString()})
|
||||
|
||||
// Retrieve values from state
|
||||
var state models.ResourceDiskModel
|
||||
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDisk: Error receiving the state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Update resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()})
|
||||
|
||||
// Set timeouts
|
||||
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDisk: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Update resourceDisk: set timeouts successfully", map[string]any{
|
||||
"disk_id": state.Id.ValueString(),
|
||||
"updateTimeout": updateTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Checking if inputs are valid
|
||||
tflog.Info(ctx, "Update resourceDisk: starting input checks", map[string]any{"disk_id": plan.Id.ValueString()})
|
||||
resp.Diagnostics.Append(resourceDiskUpdateInputChecks(ctx, &plan, &state, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDisk: Error input checks")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Update resourceDisk: input checks successful", map[string]any{"disk_id": state.Id.ValueString()})
|
||||
|
||||
diskId, err := strconv.Atoi(state.Id.ValueString())
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError("Update resourceDisk: Cannot parse disk ID from state", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// resize disk
|
||||
if !plan.SizeMax.Equal(state.SizeMax) {
|
||||
resp.Diagnostics.Append(utilities.SizeMaxUpdateDisk(ctx, uint64(diskId), &plan, &state, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDisk: Error resizing disk")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// rename disk
|
||||
if !plan.DiskName.Equal(state.DiskName) {
|
||||
resp.Diagnostics.Append(utilities.NameUpdateDisk(ctx, uint64(diskId), &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDisk: Error renaming disk")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// change io limits
|
||||
if !reflect.DeepEqual(plan.IOTune, state.IOTune) && !plan.IOTune.IsUnknown() {
|
||||
resp.Diagnostics.Append(utilities.LimitIOUpdateDisk(ctx, uint64(diskId), &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDisk: Error setting new io limits to disk")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// share/unshare disk
|
||||
if !plan.Shareable.Equal(state.Shareable) && !plan.Shareable.IsUnknown() {
|
||||
resp.Diagnostics.Append(utilities.ShareableUpdateDisk(ctx, uint64(diskId), plan.Shareable.ValueBool(), r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDisk: Error with disk share/unshare")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// update nodeIDs
|
||||
if !plan.NodeIDs.Equal(state.NodeIDs) && !plan.NodeIDs.IsUnknown() {
|
||||
resp.Diagnostics.Append(utilities.ResourceDiskChangeNodes(ctx, uint64(diskId), &state, &plan, false, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDiskUpdate: Error with update nodeIDs")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "Update resourceDisk: disk update is completed", map[string]any{"disk_id": plan.Id.ValueString()})
|
||||
|
||||
// Map response body to schema and populate Computed attribute values
|
||||
resp.Diagnostics.Append(flattens.DiskResource(ctx, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
// Set data last update
|
||||
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
|
||||
|
||||
// Set state to fully populated data
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Delete deletes the resource and removes the Terraform state on success.
|
||||
func (r *resourceDisk) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
|
||||
// Get current state
|
||||
var state models.ResourceDiskModel
|
||||
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Delete resourceDisk: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Delete resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()})
|
||||
|
||||
// Set timeouts
|
||||
deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Delete resourceDisk: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Delete resourceDisk: set timeouts successfully", map[string]any{
|
||||
"disk_id": state.Id.ValueString(),
|
||||
"deleteTimeout": deleteTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
|
||||
defer cancel()
|
||||
|
||||
detach := true
|
||||
permanently := true
|
||||
|
||||
if state.Detach.ValueBool() == false && !state.Detach.IsNull() && !state.Detach.IsUnknown() {
|
||||
detach = false
|
||||
}
|
||||
|
||||
if state.Permanently.ValueBool() == false && !state.Permanently.IsNull() && !state.Permanently.IsUnknown() {
|
||||
permanently = false
|
||||
}
|
||||
// Delete existing resource group
|
||||
delReq := disks.DeleteRequest{
|
||||
DiskID: uint64(state.DiskId.ValueInt64()),
|
||||
Detach: detach, // default true
|
||||
Permanently: permanently, // default true
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "Delete resourceDisk: before call CloudBroker().Disks().Delete", map[string]any{"req": delReq})
|
||||
_, err := r.client.CloudBroker().Disks().Delete(ctx, delReq)
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError("Delete resourceDisk: Error deleting disk with error: ", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End delete resourceDisk", map[string]any{"disk_id": state.Id.ValueString()})
|
||||
}
|
||||
|
||||
// Schema defines the schema for the resource.
|
||||
func (r *resourceDisk) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaResourceDisk(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Metadata returns the resource type name.
|
||||
func (r *resourceDisk) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_disk"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the resource.
|
||||
func (r *resourceDisk) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure resourceDisk")
|
||||
r.client = client.Resource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure resourceDisk successfully")
|
||||
}
|
||||
|
||||
func (r *resourceDisk) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
|
||||
// Retrieve import ID and save to id attribute
|
||||
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
|
||||
}
|
||||
@@ -0,0 +1,333 @@
|
||||
package cbDisks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/path"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ resource.Resource = &resourceDiskReplication{}
|
||||
_ resource.ResourceWithImportState = &resourceDiskReplication{}
|
||||
)
|
||||
|
||||
// NewResourceDisk is a helper function to simplify the provider implementation.
|
||||
func NewResourceDiskReplications() resource.Resource {
|
||||
return &resourceDiskReplication{}
|
||||
}
|
||||
|
||||
// resourceDiskReplication is the resource implementation.
|
||||
type resourceDiskReplication struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
// Create creates the resource and sets the initial Terraform state.
|
||||
func (r *resourceDiskReplication) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
|
||||
// Get plan to create resource group
|
||||
var plan models.ResourceRecordDiskReplicationModel
|
||||
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Create resourceDiskReplication: Error receiving the plan")
|
||||
return
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "Create resourceDiskReplication: got plan successfully")
|
||||
tflog.Info(ctx, "Create resourceDiskReplication: start creating")
|
||||
|
||||
// Set timeouts
|
||||
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Create resourceDiskReplication: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Create resourceDiskReplication: set timeouts successfully")
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, createTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Check if input values are valid in the platform
|
||||
tflog.Info(ctx, "Create resourceDiskReplication: starting input checks")
|
||||
resp.Diagnostics.Append(resourceDiskReplicationInputChecks(ctx, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Create resourceDiskReplication: Error input checks")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Create resourceDiskReplication: input checks successful")
|
||||
|
||||
reqCreate := disks.ReplicateRequest{
|
||||
DiskID: uint64(plan.DiskId.ValueInt64()),
|
||||
Name: plan.Name.ValueString(),
|
||||
SepID: uint64(plan.SepID.ValueInt64()),
|
||||
PoolName: plan.PoolName.ValueString(),
|
||||
}
|
||||
|
||||
diskReplicaId, err := r.client.CloudBroker().Disks().Replicate(ctx, reqCreate)
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError(
|
||||
"Create resourceDiskReplication: unable to replicate disk",
|
||||
err.Error(),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
tflog.Info(ctx, fmt.Sprintf("resourceDiskReplicationCreate: create replica complete for disk with ID: %d", plan.DiskId.ValueInt64()))
|
||||
|
||||
start := plan.Start.ValueBool()
|
||||
ok := !(plan.Start.IsNull() || plan.Start.IsUnknown())
|
||||
|
||||
if ok && !start {
|
||||
tflog.Info(ctx, fmt.Sprintf("resourceDiskReplicationCreate: replication between disk with ID: %d and replica with ID: %d, try to stop", uint64(plan.DiskId.ValueInt64()), diskReplicaId))
|
||||
reqStop := disks.ReplicationStopRequest{
|
||||
DiskID: uint64(plan.DiskId.ValueInt64()),
|
||||
}
|
||||
_, err = r.client.CloudBroker().Disks().ReplicationStop(ctx, reqStop)
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError(
|
||||
fmt.Sprintf("resourceDiskReplicationCreate: replication between disk with ID: %d and replica with ID: %d, stoped", uint64(plan.DiskId.ValueInt64()), diskReplicaId),
|
||||
err.Error(),
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Map response body to schema and populate Computed attribute values
|
||||
resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
// Set state to fully populated data
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Read refreshes the Terraform state with the latest data.
|
||||
func (r *resourceDiskReplication) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
|
||||
// Get current state
|
||||
var state models.ResourceRecordDiskReplicationModel
|
||||
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
||||
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceDiskReplication: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read resourceDiskReplication: got state successfully")
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceDiskReplication: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read resourceDiskReplication: set timeouts successfully", map[string]any{
|
||||
"disk_id": state.DiskId.ValueInt64(),
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// read status
|
||||
resp.Diagnostics.Append(utilities.ReplicationDiskReadStatus(ctx, &state, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceDiskReplication: Error reading disk status")
|
||||
return
|
||||
}
|
||||
|
||||
// Overwrite items with refreshed state
|
||||
resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &state, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceDiskReplication: Error flatten disk")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceDiskReplication: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read resourceDiskReplication")
|
||||
}
|
||||
|
||||
// Update updates the resource and sets the updated Terraform state on success.
|
||||
func (r *resourceDiskReplication) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
|
||||
// Retrieve values from plan
|
||||
var plan models.ResourceRecordDiskReplicationModel
|
||||
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDiskReplication: Error receiving the plan")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Update resourceDiskReplication: got plan successfully", map[string]any{"disk_id": plan.DiskId.ValueInt64()})
|
||||
|
||||
// Retrieve values from state
|
||||
var state models.ResourceRecordDiskReplicationModel
|
||||
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDiskReplication: Error receiving the state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Update resourceDiskReplication: got state successfully", map[string]any{"disk_id": state.DiskId.ValueInt64()})
|
||||
|
||||
// Set timeouts
|
||||
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDiskReplication: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Update resourceDiskReplication: set timeouts successfully", map[string]any{
|
||||
"disk_id": state.DiskId.ValueInt64(),
|
||||
"updateTimeout": updateTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Checking if inputs are valid
|
||||
tflog.Info(ctx, "Update resourceDiskReplication: starting input checks", map[string]any{"disk_id": plan.DiskId.ValueInt64()})
|
||||
resp.Diagnostics.Append(resourceDiskReplicationInputChecks(ctx, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDiskReplication: Error input checks")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Update resourceDiskReplication: input checks successful", map[string]any{"disk_id": state.DiskId.ValueInt64()})
|
||||
|
||||
if !plan.Start.Equal(state.Start) {
|
||||
resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdateStartStop(ctx, &state, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdateStartStop")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !plan.Pause.Equal(state.Pause) {
|
||||
resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdatePause(ctx, &state, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdatePause")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !plan.Reverse.Equal(state.Reverse) {
|
||||
resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdateReverse(ctx, &state, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdateReverse")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "Update resourceDiskReplication: disk update is completed", map[string]any{"disk_id": plan.DiskId.ValueInt64()})
|
||||
|
||||
// Overwrite items with refreshed state
|
||||
resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &state, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceDiskReplication: Error flatten disk")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceDiskReplication: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read resourceDiskReplication")
|
||||
}
|
||||
|
||||
// Delete deletes the resource and removes the Terraform state on success.
|
||||
func (r *resourceDiskReplication) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
|
||||
// Get current state
|
||||
var state models.ResourceRecordDiskReplicationModel
|
||||
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Delete resourceDiskReplication: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Delete resourceDiskReplication: got state successfully", map[string]any{"disk_id": state.DiskId.ValueInt64()})
|
||||
|
||||
// Set timeouts
|
||||
deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Delete resourceDiskReplication: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Delete resourceDiskReplication: set timeouts successfully", map[string]any{
|
||||
"disk_id": state.DiskId.ValueInt64(),
|
||||
"deleteTimeout": deleteTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
|
||||
defer cancel()
|
||||
|
||||
detach := true
|
||||
permanently := true
|
||||
|
||||
if state.Detach.ValueBool() == false && !state.Detach.IsNull() && !state.Detach.IsUnknown() {
|
||||
detach = false
|
||||
}
|
||||
|
||||
if state.Permanently.ValueBool() == false && !state.Permanently.IsNull() && !state.Permanently.IsUnknown() {
|
||||
permanently = false
|
||||
}
|
||||
|
||||
// Delete existing resource group
|
||||
delReq := disks.DeleteRequest{
|
||||
DiskID: uint64(state.DiskId.ValueInt64()),
|
||||
Detach: detach, // default true
|
||||
Permanently: permanently, // default true
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "Delete resourceDiskReplication: before call CloudBroker().Disks().Delete", map[string]any{"req": delReq})
|
||||
_, err := r.client.CloudBroker().Disks().Delete(ctx, delReq)
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError("Delete resourceDiskReplication: Error deleting disk with error: ", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End delete resourceDiskReplication", map[string]any{"disk_id": state.DiskId.ValueInt64()})
|
||||
}
|
||||
|
||||
// Schema defines the schema for the resource.
|
||||
func (r *resourceDiskReplication) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaResourceDiskReplication(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Metadata returns the resource type name.
|
||||
func (r *resourceDiskReplication) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_disk_replication"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the resource.
|
||||
func (r *resourceDiskReplication) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure resourceDiskReplication")
|
||||
r.client = client.Resource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure resourceDiskReplication successfully")
|
||||
}
|
||||
|
||||
func (r *resourceDiskReplication) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
|
||||
// Retrieve import ID and save to id attribute
|
||||
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
|
||||
}
|
||||
295
internal/service/cloudbroker/disks/resource_cb_disk_snapshot.go
Normal file
295
internal/service/cloudbroker/disks/resource_cb_disk_snapshot.go
Normal file
@@ -0,0 +1,295 @@
|
||||
package cbDisks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
|
||||
"github.com/hashicorp/terraform-plugin-framework/path"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ resource.Resource = &resourceDiskSnapshot{}
|
||||
_ resource.ResourceWithImportState = &resourceDiskSnapshot{}
|
||||
)
|
||||
|
||||
// NewResourceDiskSnapshot is a helper function to simplify the provider implementation.
|
||||
func NewResourceDiskSnapshot() resource.Resource {
|
||||
return &resourceDiskSnapshot{}
|
||||
}
|
||||
|
||||
// resourceDiskSnapshot is the resource implementation.
|
||||
type resourceDiskSnapshot struct {
|
||||
client *decort.DecortClient
|
||||
}
|
||||
|
||||
// Create creates the resource and sets the initial Terraform state.
|
||||
func (r *resourceDiskSnapshot) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
|
||||
// Get plan to create resource group
|
||||
var plan models.ResourceDiskSnapshotModel
|
||||
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Create resourceDiskSnapshot: Error receiving the plan")
|
||||
return
|
||||
}
|
||||
|
||||
ctxCreateSnpsht := map[string]any{
|
||||
"disk_id": plan.DiskID.ValueInt64(),
|
||||
"label": plan.Label.ValueString(),
|
||||
}
|
||||
tflog.Info(ctx, "Create resourceDiskSnapshot: got plan successfully", ctxCreateSnpsht)
|
||||
tflog.Info(ctx, "Create resourceDiskSnapshot: start creating", ctxCreateSnpsht)
|
||||
|
||||
// Set timeouts
|
||||
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Create resourceDiskSnapshot: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Create resourceDiskSnapshot: set timeouts successfully", map[string]any{
|
||||
"disk_id": plan.DiskID.ValueInt64(),
|
||||
"label": plan.Label.ValueString(),
|
||||
"createTimeout": createTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, createTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Check if input values are valid in the platform
|
||||
_, diags = utilities.DiskSnapshotCheckPresence(ctx, &plan, r.client)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Create resourceDiskSnapshot: disk snapshot does not exist")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Create resourceDiskSnapshot: snapshot successfully loaded", ctxCreateSnpsht)
|
||||
|
||||
if plan.Rollback.ValueBool() { // default is false
|
||||
resp.Diagnostics.Append(utilities.RollbackDiskSnapshot(ctx, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Create resourceDiskSnapshot: Error rollback snapshot")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "Create resourceDiskSnapshot: resource creation is completed", ctxCreateSnpsht)
|
||||
|
||||
// Map response body to schema and populate Computed attribute values
|
||||
resp.Diagnostics.Append(flattens.DiskSnapshotResource(ctx, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
// Set data last update
|
||||
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
|
||||
|
||||
// Set state to fully populated data
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Read refreshes the Terraform state with the latest data.
|
||||
func (r *resourceDiskSnapshot) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
|
||||
// Get current state
|
||||
var state models.ResourceDiskSnapshotModel
|
||||
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceDiskSnapshot: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read resourceDiskSnapshot: got state successfully", map[string]any{
|
||||
"disk_id": state.DiskID.ValueInt64(),
|
||||
"label": state.Label.ValueString(),
|
||||
})
|
||||
|
||||
// Set timeouts
|
||||
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceDiskSnapshot: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Read resourceDiskSnapshot: set timeouts successfully", map[string]any{
|
||||
"disk_id": state.DiskID.ValueInt64(),
|
||||
"label": state.Label.ValueString(),
|
||||
"readTimeout": readTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Overwrite items with refreshed state
|
||||
resp.Diagnostics.Append(flattens.DiskSnapshotResource(ctx, &state, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceDiskSnapshot: Error flatten disk snapshot")
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Read resourceDiskSnapshot: Error set state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "End read resourceDiskSnapshot")
|
||||
}
|
||||
|
||||
// Update updates the resource and sets the updated Terraform state on success.
|
||||
func (r *resourceDiskSnapshot) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
|
||||
// Retrieve values from plan
|
||||
var plan models.ResourceDiskSnapshotModel
|
||||
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDiskSnapshot: Error receiving the plan")
|
||||
return
|
||||
}
|
||||
|
||||
ctxSnpsht := map[string]any{
|
||||
"disk_id": plan.DiskID.ValueInt64(),
|
||||
"label": plan.Label.ValueString(),
|
||||
}
|
||||
tflog.Info(ctx, "Update resourceDiskSnapshot: got plan successfully", ctxSnpsht)
|
||||
|
||||
// Retrieve values from state
|
||||
var state models.ResourceDiskSnapshotModel
|
||||
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDiskSnapshot: Error receiving the state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Update resourceDiskSnapshot: got state successfully", ctxSnpsht)
|
||||
|
||||
// Set timeouts
|
||||
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDiskSnapshot: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Update resourceDiskSnapshot: set timeouts successfully", map[string]any{
|
||||
"disk_id": plan.DiskID.ValueInt64(),
|
||||
"label": plan.Label.ValueString(),
|
||||
"updateTimeout": updateTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
|
||||
defer cancel()
|
||||
|
||||
if !plan.Rollback.Equal(state.Rollback) && plan.Rollback.ValueBool() {
|
||||
resp.Diagnostics.Append(utilities.RollbackDiskSnapshot(ctx, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceDiskSnapshot: Error rollback snapshot")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "Update resourceDiskSnapshot: disk snapshot update is completed", ctxSnpsht)
|
||||
|
||||
// Map response body to schema and populate Computed attribute values
|
||||
resp.Diagnostics.Append(flattens.DiskSnapshotResource(ctx, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
// Set data last update
|
||||
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
|
||||
|
||||
// Set state to fully populated data
|
||||
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Delete deletes the resource and removes the Terraform state on success.
|
||||
func (r *resourceDiskSnapshot) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
|
||||
// Get current state
|
||||
var state models.ResourceDiskSnapshotModel
|
||||
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Delete resourceDiskSnapshot: Error get state")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Delete resourceDiskSnapshot: got state successfully", map[string]any{
|
||||
"disk_id": state.DiskID.ValueInt64(),
|
||||
"label": state.Label.ValueString()})
|
||||
|
||||
// Set timeouts
|
||||
deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Delete resourceDiskSnapshot: Error set timeout")
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "Delete resourceDiskSnapshot: set timeouts successfully", map[string]any{
|
||||
"disk_id": state.DiskID.ValueInt64(),
|
||||
"label": state.Label.ValueString(),
|
||||
"deleteTimeout": deleteTimeout})
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Check if input values are valid in the platform
|
||||
_, diags = utilities.DiskSnapshotCheckPresence(ctx, &state, r.client)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Delete resourceDiskSnapshot: disk snapshot does not exist")
|
||||
return
|
||||
}
|
||||
|
||||
delReq := disks.SnapshotDeleteRequest{
|
||||
DiskID: uint64(state.DiskID.ValueInt64()),
|
||||
Label: state.Label.ValueString(),
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "Delete resourceDiskSnapshot: before call CloudBroker().Disks().SnapshotDelete", map[string]any{"req": delReq})
|
||||
_, err := r.client.CloudBroker().Disks().SnapshotDelete(ctx, delReq)
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError("Delete resourceDiskSnapshot: Error deleting disk with error: ", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End delete resourceDiskSnapshot", map[string]any{
|
||||
"disk_id": state.Id.ValueString(),
|
||||
"label": state.Label.ValueString()})
|
||||
}
|
||||
|
||||
// Schema defines the schema for the resource.
|
||||
func (r *resourceDiskSnapshot) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: schemas.MakeSchemaResourceDiskSnapshot(),
|
||||
Blocks: map[string]schema.Block{
|
||||
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Metadata returns the resource type name.
|
||||
func (r *resourceDiskSnapshot) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_cb_disk_snapshot"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the resource.
|
||||
func (r *resourceDiskSnapshot) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
|
||||
tflog.Info(ctx, "Get Configure resourceDiskSnapshot")
|
||||
r.client = client.Resource(ctx, &req, resp)
|
||||
tflog.Info(ctx, "Getting Configure resourceDiskSnapshot successfully")
|
||||
}
|
||||
|
||||
func (r *resourceDiskSnapshot) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
|
||||
// Retrieve import ID and save to id attribute
|
||||
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
|
||||
}
|
||||
@@ -0,0 +1,253 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceDisk() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
"disk_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"acl": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"boot_partition": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"computes": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"compute_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"compute_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"desc": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"destruction_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"devicename": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_path": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"image_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"images": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"iotune": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"read_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"iqn": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"login": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"machine_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"machine_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"order": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"params": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"parent_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"passwd": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pci_slot": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pool": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"present_to": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"purge_attempts": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"purge_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"replication": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"disk_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pool_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"role": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"self_volume_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"storage_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"volume_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"reality_device_number": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"reference_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"role": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"sep_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"sep_type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"shareable": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_used": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snapshots": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"label": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"reference_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vmid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,293 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceDiskList() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
"by_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"disk_max_size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
},
|
||||
"shared": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"sort_by": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"account_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"acl": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"boot_partition": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"computes": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"compute_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"compute_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"desc": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"destruction_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"devicename": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_path": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"image_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"images": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"iotune": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"read_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"iqn": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"login": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"machine_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"machine_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"order": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"params": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"parent_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"passwd": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pci_slot": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pool": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"present_to": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"purge_attempts": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"purge_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"replication": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"disk_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pool_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"role": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"self_volume_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"storage_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"volume_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"reality_device_number": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"reference_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"role": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"sep_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"sep_type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"shareable": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_used": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snapshots": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"label": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"reference_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vmid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,293 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceDiskListDeleted() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
"by_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"disk_max_size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
},
|
||||
"shared": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"sort_by": schema.StringAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"account_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"acl": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"boot_partition": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"computes": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"compute_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"compute_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"desc": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"destruction_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"devicename": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_path": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"image_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"images": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"iotune": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"read_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"iqn": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"login": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"machine_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"machine_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"order": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"params": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"parent_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"passwd": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pci_slot": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pool": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"present_to": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"purge_attempts": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"purge_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"replication": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"disk_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pool_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"role": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"self_volume_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"storage_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"volume_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"reality_device_number": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"reference_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"role": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"sep_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"sep_type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"shareable": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_used": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snapshots": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"label": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"reference_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vmid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceDiskListTypes() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// optional attributes
|
||||
"sort_by": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "sort by one of supported fields, format +|-(field)",
|
||||
},
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "Page number",
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "Page size",
|
||||
},
|
||||
|
||||
// computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"items": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceDiskListTypesDetailed() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// optional attributes
|
||||
"sort_by": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "sort by one of supported fields, format +|-(field)",
|
||||
},
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "Page number",
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "Page size",
|
||||
},
|
||||
|
||||
// computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"pools": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"system": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"types": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"sep_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"sep_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,269 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceDiskListUnattached() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// optional attributes
|
||||
"by_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "Find by ID",
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "Find by account name",
|
||||
},
|
||||
"disk_max_size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "Find by max disk size",
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "Find by status",
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "ID of the account the disks belong to",
|
||||
},
|
||||
"sep_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "find by sep ID",
|
||||
},
|
||||
"pool_name": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "find by pool name",
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "type of the disks",
|
||||
},
|
||||
"sort_by": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: "sort by one of supported fields, format +|-(field)",
|
||||
},
|
||||
"page": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "Page number",
|
||||
},
|
||||
"size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "Page size",
|
||||
},
|
||||
|
||||
// computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"ckey": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"meta": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"acl": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"boot_partition": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"desc": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"destruction_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_path": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"image_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"images": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"iotune": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"read_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"iqn": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"login": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"order": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"params": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"parent_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"passwd": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pci_slot": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pool": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"present_to": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"purge_attempts": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"purge_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"reality_device_number": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"reference_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"role": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"sep_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"shareable": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_used": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snapshots": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"label": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vmid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,251 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceDiskReplication() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
"disk_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "Id of primary disk",
|
||||
},
|
||||
"status_replication": schema.StringAttribute{
|
||||
Computed: true,
|
||||
Description: "Status of replication",
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
Description: "The unique ID of the subscriber-owner of the disk",
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
Description: "The name of the subscriber '(account') to whom this disk belongs",
|
||||
},
|
||||
"acl": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"boot_partition": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"computes": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"compute_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"compute_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"desc": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"destruction_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"devicename": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_path": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"image_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"images": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"iotune": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"read_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"iqn": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"login": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"order": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"params": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"parent_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"passwd": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pci_slot": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pool": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"present_to": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"purge_attempts": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"purge_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"replication": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"disk_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pool_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"role": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"self_volume_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"storage_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"volume_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"reality_device_number": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"reference_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"role": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"sep_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"sep_type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"shareable": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_used": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snapshots": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"label": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vmid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceDiskSnapshot() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"disk_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "The unique ID of the subscriber-owner of the disk",
|
||||
},
|
||||
"label": schema.StringAttribute{
|
||||
Required: true,
|
||||
Description: "Name of the snapshot",
|
||||
},
|
||||
|
||||
// computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
)
|
||||
|
||||
func MakeSchemaDataSourceDiskSnapshotList() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"disk_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "The unique ID of the subscriber-owner of the disk",
|
||||
},
|
||||
|
||||
// computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"items": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"label": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"reference_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,303 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
|
||||
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaResourceDisk() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"account_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "ID of the account",
|
||||
},
|
||||
"disk_name": schema.StringAttribute{
|
||||
Required: true,
|
||||
Description: "Iname of disk",
|
||||
},
|
||||
"size_max": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "size in GB, default is 10",
|
||||
},
|
||||
"gid": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "ID of the grid (platform)",
|
||||
},
|
||||
"node_ids": schema.ListAttribute{
|
||||
ElementType: types.Int64Type,
|
||||
Optional: true,
|
||||
Description: "ID of the grid (platform)",
|
||||
},
|
||||
"ssd_size": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Description: "size in GB, default is 10",
|
||||
},
|
||||
// optional attributes
|
||||
"desc": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "description of disk",
|
||||
},
|
||||
"pool": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Pool for disk location",
|
||||
},
|
||||
"sep_id": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Storage endpoint provider ID to create disk",
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Validators: []validator.String{
|
||||
stringvalidator.OneOf("B", "D", "T"), // case is not ignored
|
||||
},
|
||||
Description: "(B;D;T) B=Boot;D=Data;T=Temp",
|
||||
// default is D
|
||||
},
|
||||
"detach": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
Description: "Detaching the disk from compute",
|
||||
// default is false
|
||||
},
|
||||
"permanently": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
Description: "Whether to completely delete the disk, works only with non attached disks",
|
||||
// default is false
|
||||
},
|
||||
"shareable": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "share disk",
|
||||
},
|
||||
"iotune": schema.SingleNestedAttribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"read_bytes_sec": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Number of bytes to read per second",
|
||||
},
|
||||
"read_bytes_sec_max": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Maximum number of bytes to read",
|
||||
},
|
||||
"read_iops_sec": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Number of io read operations per second",
|
||||
},
|
||||
"read_iops_sec_max": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Maximum number of io read operations",
|
||||
},
|
||||
"size_iops_sec": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Size of io operations",
|
||||
},
|
||||
"total_bytes_sec": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Total size bytes per second",
|
||||
},
|
||||
"total_bytes_sec_max": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Maximum total size of bytes per second",
|
||||
},
|
||||
"total_iops_sec": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Total number of io operations per second",
|
||||
},
|
||||
"total_iops_sec_max": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Maximum total number of io operations per second",
|
||||
},
|
||||
"write_bytes_sec": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Number of bytes to write per second",
|
||||
},
|
||||
"write_bytes_sec_max": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Maximum number of bytes to write per second",
|
||||
},
|
||||
"write_iops_sec": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Number of write operations per second",
|
||||
},
|
||||
"write_iops_sec_max": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Maximum number of write operations per second",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
PlanModifiers: []planmodifier.String{
|
||||
stringplanmodifier.UseStateForUnknown(),
|
||||
},
|
||||
},
|
||||
"disk_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"acl": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"boot_partition": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"computes": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"compute_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"compute_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"destruction_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"devicename": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_path": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"image_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"images": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"last_updated": schema.StringAttribute{
|
||||
Computed: true,
|
||||
Description: "Timestamp of the last Terraform update of the disk resource.",
|
||||
},
|
||||
"iqn": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"login": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"order": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"params": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"parent_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"passwd": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pci_slot": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"purge_attempts": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"present_to": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"purge_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"reality_device_number": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"reference_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"role": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"sep_type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_used": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snapshots": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"label": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vmid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,281 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
)
|
||||
|
||||
func MakeSchemaResourceDiskReplication() map[string]schema.Attribute {
|
||||
// required attributes
|
||||
return map[string]schema.Attribute{
|
||||
"disk_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
},
|
||||
"disk_name": schema.StringAttribute{
|
||||
Required: true,
|
||||
},
|
||||
"sep_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
},
|
||||
"pool_name": schema.StringAttribute{
|
||||
Required: true,
|
||||
},
|
||||
|
||||
// optional attributes
|
||||
"pause": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"reverse": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"start": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"detach": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
"permanently": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
// computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
PlanModifiers: []planmodifier.String{
|
||||
stringplanmodifier.UseStateForUnknown(),
|
||||
},
|
||||
},
|
||||
"replica_disk_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"status_replication": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"acl": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"boot_partition": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"computes": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"compute_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"compute_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"created_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"desc": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"destruction_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"devicename": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"disk_path": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"guid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"gid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"image_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"images": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"iotune": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"read_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"iqn": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"login": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"order": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"params": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"parent_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"passwd": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pci_slot": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pool": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"present_to": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"purge_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"purge_attempts": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"replication": schema.SingleNestedAttribute{
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"disk_id": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"pool_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"role": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"self_volume_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"storage_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"volume_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"reality_device_number": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"reference_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_name": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"role": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"sep_type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"shareable": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_max": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"size_used": schema.Float64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snapshots": schema.ListNestedAttribute{
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"label": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"type": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"vmid": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
package schemas
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
|
||||
)
|
||||
|
||||
func MakeSchemaResourceDiskSnapshot() map[string]schema.Attribute {
|
||||
return map[string]schema.Attribute{
|
||||
// required attributes
|
||||
"disk_id": schema.Int64Attribute{
|
||||
Required: true,
|
||||
Description: "The unique ID of the subscriber-owner of the disk",
|
||||
},
|
||||
"label": schema.StringAttribute{
|
||||
Required: true,
|
||||
Description: "Name of the snapshot",
|
||||
},
|
||||
|
||||
// optional attributes
|
||||
"rollback": schema.BoolAttribute{
|
||||
Optional: true,
|
||||
Description: "Needed in order to make a snapshot rollback",
|
||||
// default is false
|
||||
},
|
||||
"timestamp": schema.Int64Attribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Snapshot time",
|
||||
},
|
||||
|
||||
// computed attributes
|
||||
"id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
PlanModifiers: []planmodifier.String{
|
||||
stringplanmodifier.UseStateForUnknown(),
|
||||
},
|
||||
// id is generated as "<disk_id>#<label>"
|
||||
},
|
||||
"last_updated": schema.StringAttribute{
|
||||
Computed: true,
|
||||
Description: "Timestamp of the last Terraform update of the disk resource.",
|
||||
},
|
||||
"guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_guid": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_time": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
)
|
||||
|
||||
func DataSourceDiskCheckPresence(ctx context.Context, state *models.DiskModel, c *decort.DecortClient) (*disks.RecordDisk, diag.Diagnostics) {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
listReq := disks.GetRequest{DiskID: uint64(state.DiskID.ValueInt64())}
|
||||
|
||||
tflog.Info(ctx, "DataSourceDiskCheckPresence: before call CloudBroker().Disks().Get", map[string]any{
|
||||
"req": listReq,
|
||||
})
|
||||
|
||||
disk, err := c.CloudBroker().Disks().Get(ctx, listReq)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about disk", err.Error())
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "DataSourceDiskCheckPresence: got successfully")
|
||||
return disk, nil
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
)
|
||||
|
||||
func DataSourceDiskListCheckPresence(ctx context.Context, state *models.DisksModel, c *decort.DecortClient) (*disks.ListDisks, diag.Diagnostics) {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
listReq := disks.ListRequest{}
|
||||
|
||||
if !state.ByID.IsNull() {
|
||||
listReq.ByID = uint64(state.ByID.ValueInt64())
|
||||
}
|
||||
if !state.Name.IsNull() {
|
||||
listReq.Name = state.Name.ValueString()
|
||||
}
|
||||
if !state.AccountName.IsNull() {
|
||||
listReq.AccountName = state.AccountName.ValueString()
|
||||
}
|
||||
if !state.DiskMaxSize.IsNull() {
|
||||
listReq.DiskMaxSize = state.DiskMaxSize.ValueInt64()
|
||||
}
|
||||
if !state.Shared.IsNull() {
|
||||
listReq.Shared = state.Shared.ValueBool()
|
||||
}
|
||||
if !state.AccountID.IsNull() {
|
||||
listReq.AccountID = uint64(state.AccountID.ValueInt64())
|
||||
}
|
||||
if !state.Type.IsNull() {
|
||||
listReq.Type = state.Type.ValueString()
|
||||
}
|
||||
if !state.SortBy.IsNull() {
|
||||
listReq.SortBy = state.SortBy.ValueString()
|
||||
}
|
||||
if !state.Page.IsNull() {
|
||||
listReq.Page = uint64(state.Page.ValueInt64())
|
||||
}
|
||||
if !state.Size.IsNull() {
|
||||
listReq.Size = uint64(state.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "DataSourceDiskListCheckPresence: before call CloudBroker().Disks().List", map[string]any{
|
||||
"req": listReq,
|
||||
})
|
||||
|
||||
diskList, err := c.CloudBroker().Disks().List(ctx, listReq)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about disk list", err.Error())
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "DataSourceDiskListCheckPresence: got list successfully", map[string]any{
|
||||
"entry_count": diskList.EntryCount,
|
||||
})
|
||||
return diskList, nil
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
)
|
||||
|
||||
func DataSourceDiskListDeletedCheckPresence(ctx context.Context, state *models.ListDisksModel, c *decort.DecortClient) (*disks.ListDisks, diag.Diagnostics) {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
listReq := disks.ListDeletedRequest{}
|
||||
|
||||
if !state.ByID.IsNull() {
|
||||
listReq.ByID = uint64(state.ByID.ValueInt64())
|
||||
}
|
||||
if !state.Name.IsNull() {
|
||||
listReq.Name = state.Name.ValueString()
|
||||
}
|
||||
if !state.AccountName.IsNull() {
|
||||
listReq.AccountName = state.AccountName.ValueString()
|
||||
}
|
||||
if !state.DiskMaxSize.IsNull() {
|
||||
listReq.DiskMaxSize = state.DiskMaxSize.ValueInt64()
|
||||
}
|
||||
if !state.Shared.IsNull() {
|
||||
listReq.Shared = state.Shared.ValueBool()
|
||||
}
|
||||
if !state.AccountID.IsNull() {
|
||||
listReq.AccountID = uint64(state.AccountID.ValueInt64())
|
||||
}
|
||||
if !state.Type.IsNull() {
|
||||
listReq.Type = state.Type.ValueString()
|
||||
}
|
||||
if !state.SortBy.IsNull() {
|
||||
listReq.SortBy = state.SortBy.ValueString()
|
||||
}
|
||||
if !state.Page.IsNull() {
|
||||
listReq.Page = uint64(state.Page.ValueInt64())
|
||||
}
|
||||
if !state.Size.IsNull() {
|
||||
listReq.Size = uint64(state.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "DataSourceDiskListDeletedCheckPresence: before call CloudBroker().Disks().ListDeleted", map[string]any{
|
||||
"req": listReq,
|
||||
})
|
||||
|
||||
diskList, err := c.CloudBroker().Disks().ListDeleted(ctx, listReq)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about disk list", err.Error())
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "DataSourceDiskListDeletedCheckPresence: got list successfully", map[string]any{
|
||||
"entry_count": diskList.EntryCount,
|
||||
})
|
||||
return diskList, nil
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
)
|
||||
|
||||
func DataSourceDiskListTypesCheckPresence(ctx context.Context, state *models.DataSourceDiskListTypesModel, c *decort.DecortClient) (*disks.ListTypes, diag.Diagnostics) {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
listTypesReq := disks.ListTypesRequest{Detailed: false}
|
||||
|
||||
if !state.SortBy.IsNull() {
|
||||
listTypesReq.SortBy = state.SortBy.ValueString()
|
||||
}
|
||||
if !state.Page.IsNull() {
|
||||
listTypesReq.Page = uint64(state.Page.ValueInt64())
|
||||
}
|
||||
if !state.Size.IsNull() {
|
||||
listTypesReq.Size = uint64(state.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "DataSourceDiskListTypesCheckPresence: before call CloudBroker().Disks().ListTypes", map[string]any{
|
||||
"req": listTypesReq,
|
||||
})
|
||||
|
||||
listTypes, err := c.CloudBroker().Disks().ListTypes(ctx, listTypesReq)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about disk list types", err.Error())
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "DataSourceDiskListTypesCheckPresence: got list successfully", map[string]any{
|
||||
"entry_count": listTypes.EntryCount,
|
||||
})
|
||||
return listTypes, nil
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
)
|
||||
|
||||
func DataSourceDiskListTypesDetailedCheckPresence(ctx context.Context, state *models.DataSourceDiskListTypesDetailedModel, c *decort.DecortClient) (*disks.ListTypes, diag.Diagnostics) {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
listTypesReq := disks.ListTypesRequest{Detailed: true}
|
||||
|
||||
if !state.SortBy.IsNull() {
|
||||
listTypesReq.SortBy = state.SortBy.ValueString()
|
||||
}
|
||||
if !state.Page.IsNull() {
|
||||
listTypesReq.Page = uint64(state.Page.ValueInt64())
|
||||
}
|
||||
if !state.Size.IsNull() {
|
||||
listTypesReq.Size = uint64(state.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "DataSourceDiskListTypesDetailedCheckPresence: before call CloudBroker().Disks().ListTypes", map[string]any{
|
||||
"req": listTypesReq,
|
||||
})
|
||||
|
||||
listTypes, err := c.CloudBroker().Disks().ListTypes(ctx, listTypesReq)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about disk list types", err.Error())
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "DataSourceDiskListTypesDetailedCheckPresence: got list successfully", map[string]any{
|
||||
"entry_count": listTypes.EntryCount,
|
||||
})
|
||||
return listTypes, nil
|
||||
}
|
||||
@@ -0,0 +1,66 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
)
|
||||
|
||||
func DataSourceDiskListUnattachedCheckPresence(ctx context.Context, state *models.DataSourceDiskListUnattachedModel, c *decort.DecortClient) (*disks.ListUnattachedDisks, diag.Diagnostics) {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
listReq := disks.ListUnattachedRequest{}
|
||||
|
||||
if !state.ByID.IsNull() {
|
||||
listReq.ByID = uint64(state.ByID.ValueInt64())
|
||||
}
|
||||
if !state.AccountName.IsNull() {
|
||||
listReq.AccountName = state.AccountName.ValueString()
|
||||
}
|
||||
if !state.DiskMaxSize.IsNull() {
|
||||
listReq.DiskMaxSize = state.DiskMaxSize.ValueInt64()
|
||||
}
|
||||
if !state.Status.IsNull() {
|
||||
listReq.Status = state.Status.ValueString()
|
||||
}
|
||||
if !state.AccountID.IsNull() {
|
||||
listReq.AccountID = uint64(state.AccountID.ValueInt64())
|
||||
}
|
||||
if !state.SepID.IsNull() {
|
||||
listReq.SEPID = uint64(state.SepID.ValueInt64())
|
||||
}
|
||||
if !state.PoolName.IsNull() {
|
||||
listReq.Pool = state.PoolName.ValueString()
|
||||
}
|
||||
if !state.Type.IsNull() {
|
||||
listReq.Type = state.Type.ValueString()
|
||||
}
|
||||
if !state.SortBy.IsNull() {
|
||||
listReq.SortBy = state.SortBy.ValueString()
|
||||
}
|
||||
if !state.Page.IsNull() {
|
||||
listReq.Page = uint64(state.Page.ValueInt64())
|
||||
}
|
||||
if !state.Size.IsNull() {
|
||||
listReq.Size = uint64(state.Size.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "DataSourceDiskListUnattachedCheckPresence: before call CloudBroker().Disks().ListUnattached", map[string]any{
|
||||
"req": listReq,
|
||||
})
|
||||
|
||||
diskList, err := c.CloudBroker().Disks().ListUnattached(ctx, listReq)
|
||||
if err != nil {
|
||||
diags.AddError("Cannot get info about disk list", err.Error())
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "DataSourceDiskListUnattachedCheckPresence: got list successfully", map[string]any{
|
||||
"entry_count": diskList.EntryCount,
|
||||
})
|
||||
return diskList, nil
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
)
|
||||
|
||||
func DataSourceDiskReplicationCheckPresence(ctx context.Context, state *models.RecordDiskModel, c *decort.DecortClient) (*disks.RecordDisk, *string, error) {
|
||||
|
||||
status, err := c.CloudBroker().Disks().ReplicationStatus(ctx, disks.ReplicationStatusRequest{DiskID: uint64(state.DiskId.ValueInt64())})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req := disks.GetRequest{}
|
||||
|
||||
if !state.DiskId.IsNull() && !state.DiskId.IsUnknown() {
|
||||
req.DiskID = uint64(state.DiskId.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "DataSourceDiskReplicationCheckPresence: load disk")
|
||||
disk, err := c.CloudBroker().Disks().Get(ctx, req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return disk, &status, nil
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
)
|
||||
|
||||
func DataSourceDiskSnapshotCheckPresence(ctx context.Context, plan *models.DataSourceDiskSnapshotModel, c *decort.DecortClient) (*disks.ItemSnapshot, diag.Diagnostics) {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
diskId := uint64(plan.DiskID.ValueInt64())
|
||||
label := plan.Label.ValueString()
|
||||
tflog.Info(ctx, "Start DataSourceDiskSnapshotCheckPresence", map[string]any{"disk_id": diskId, "label": label})
|
||||
|
||||
tflog.Info(ctx, "DataSourceDiskSnapshotCheckPresence: before call CloudBroker().Disks().Get", map[string]any{"disk_id": diskId})
|
||||
disk, err := c.CloudBroker().Disks().Get(ctx, disks.GetRequest{DiskID: diskId})
|
||||
if err != nil {
|
||||
diags.AddError(
|
||||
fmt.Sprintf("Cannot get info about disk with disk_id %d", diskId),
|
||||
err.Error(),
|
||||
)
|
||||
return nil, diags
|
||||
}
|
||||
tflog.Info(ctx, "DataSourceDiskSnapshotCheckPresence: response from CloudBroker().Disks().Get", map[string]any{"response": disk})
|
||||
|
||||
for _, sn := range disk.Snapshots {
|
||||
if label == sn.Label {
|
||||
return &sn, nil
|
||||
}
|
||||
}
|
||||
|
||||
diags.AddError(
|
||||
"Snapshot not found",
|
||||
fmt.Sprintf("Snapshot with label %s for disk with disk_id %d not found", label, diskId),
|
||||
)
|
||||
return nil, diags
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
)
|
||||
|
||||
func DiskSnapshotListCheckPresence(ctx context.Context, diskId uint64, c *decort.DecortClient) (*disks.ListSnapshots, error) {
|
||||
tflog.Info(ctx, fmt.Sprintf("DiskSnapshotListCheckPresence: Get info about disk snapshot list with disk ID - %v", diskId))
|
||||
|
||||
recordDisk, err := c.CloudBroker().Disks().Get(ctx, disks.GetRequest{DiskID: diskId})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about disk with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "DiskSnapshotListCheckPresence: response from CloudBroker().Disks().Get", map[string]any{
|
||||
"disk_id": diskId,
|
||||
"response": recordDisk})
|
||||
|
||||
return &recordDisk.Snapshots, err
|
||||
}
|
||||
@@ -0,0 +1,456 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
)
|
||||
|
||||
// DiskCheckPresence checks if disk with diskId exists
|
||||
func DiskCheckPresence(ctx context.Context, diskId uint64, c *decort.DecortClient) (*disks.RecordDisk, error) {
|
||||
tflog.Info(ctx, fmt.Sprintf("Get info about disk with ID - %v", diskId))
|
||||
|
||||
diskRecord, err := c.CloudBroker().Disks().Get(ctx, disks.GetRequest{DiskID: diskId})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get info about disk with error: %w", err)
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "DiskCheckPresence resourceDisk: response from CloudBroker().Disks().Get", map[string]any{"disk_id": diskId, "response": diskRecord})
|
||||
|
||||
return diskRecord, err
|
||||
}
|
||||
|
||||
// CreateRequestResourceDisk generates disk create request from plan
|
||||
func CreateRequestResourceDisk(ctx context.Context, plan *models.ResourceDiskModel) disks.CreateRequest {
|
||||
tflog.Info(ctx, "Start CreateRequestResourceDisk", map[string]any{
|
||||
"account_id": plan.AccountID.ValueInt64(),
|
||||
"disk_name": plan.DiskName.ValueString(),
|
||||
"size_max": plan.SizeMax.ValueInt64(),
|
||||
"gid": plan.GID.ValueInt64(),
|
||||
})
|
||||
|
||||
// set up required parameters in disk create request
|
||||
createReq := disks.CreateRequest{
|
||||
AccountID: uint64(plan.AccountID.ValueInt64()),
|
||||
Name: plan.DiskName.ValueString(),
|
||||
Size: uint64(plan.SizeMax.ValueInt64()),
|
||||
GID: uint64(plan.GID.ValueInt64()),
|
||||
}
|
||||
|
||||
if plan.Type.IsUnknown() {
|
||||
createReq.Type = "D" // default value
|
||||
} else {
|
||||
createReq.Type = plan.Type.ValueString()
|
||||
}
|
||||
if !plan.SEPID.IsUnknown() {
|
||||
createReq.SEPID = uint64(plan.SEPID.ValueInt64())
|
||||
}
|
||||
if !plan.Pool.IsUnknown() {
|
||||
createReq.Pool = plan.Pool.ValueString()
|
||||
}
|
||||
if !plan.Description.IsUnknown() {
|
||||
createReq.Description = plan.Description.ValueString()
|
||||
}
|
||||
if !plan.SSDSize.IsUnknown() {
|
||||
createReq.SSDSize = uint64(plan.SSDSize.ValueInt64())
|
||||
}
|
||||
|
||||
return createReq
|
||||
}
|
||||
|
||||
// LimitIOCreateDisk sets IO limits that user specified in iotune field for created resource.
|
||||
// In case of failure returns warnings.
|
||||
func LimitIOCreateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
limitIOReq := disks.LimitIORequest{
|
||||
DiskID: diskId,
|
||||
}
|
||||
|
||||
var iotunePlan models.ResourceIOTuneModel
|
||||
// plan.IOTune is not null as it was checked before call
|
||||
tflog.Info(ctx, "LimitIOCreateDisk: new iotune specified", map[string]any{"disk_id": diskId})
|
||||
diags.Append(plan.IOTune.As(ctx, &iotunePlan, basetypes.ObjectAsOptions{})...)
|
||||
if diags.HasError() {
|
||||
tflog.Error(ctx, "LimitIOCreateDisk: cannot populate iotune with plan.IOTune object element")
|
||||
return diags
|
||||
}
|
||||
|
||||
limitIOReq.IOPS = uint64(iotunePlan.TotalIOPSSec.ValueInt64())
|
||||
|
||||
limitIOReq.ReadBytesSec = uint64(iotunePlan.ReadBytesSec.ValueInt64())
|
||||
limitIOReq.ReadBytesSecMax = uint64(iotunePlan.ReadBytesSecMax.ValueInt64())
|
||||
limitIOReq.ReadIOPSSec = uint64(iotunePlan.ReadIOPSSec.ValueInt64())
|
||||
limitIOReq.ReadIOPSSecMax = uint64(iotunePlan.ReadIOPSSecMax.ValueInt64())
|
||||
|
||||
limitIOReq.SizeIOPSSec = uint64(iotunePlan.SizeIOPSSec.ValueInt64())
|
||||
limitIOReq.TotalBytesSec = uint64(iotunePlan.TotalBytesSec.ValueInt64())
|
||||
limitIOReq.TotalBytesSecMax = uint64(iotunePlan.TotalBytesSecMax.ValueInt64())
|
||||
limitIOReq.TotalIOPSSecMax = uint64(iotunePlan.TotalIOPSSecMax.ValueInt64())
|
||||
limitIOReq.TotalIOPSSec = uint64(iotunePlan.TotalIOPSSec.ValueInt64())
|
||||
|
||||
limitIOReq.WriteBytesSec = uint64(iotunePlan.WriteBytesSec.ValueInt64())
|
||||
limitIOReq.WriteBytesSecMax = uint64(iotunePlan.WriteBytesSecMax.ValueInt64())
|
||||
limitIOReq.WriteIOPSSec = uint64(iotunePlan.WriteIOPSSec.ValueInt64())
|
||||
limitIOReq.WriteIOPSSecMax = uint64(iotunePlan.WriteIOPSSecMax.ValueInt64())
|
||||
|
||||
tflog.Info(ctx, "LimitIOCreateDisk: before calling CloudBroker().Disks().LimitIO", map[string]any{
|
||||
"disk_id": diskId,
|
||||
"limitIOReq": limitIOReq})
|
||||
res, err := c.CloudBroker().Disks().LimitIO(ctx, limitIOReq)
|
||||
if err != nil {
|
||||
diags.AddWarning("LimitIOCreateDisk: Unable to limit io for Disk",
|
||||
err.Error())
|
||||
}
|
||||
tflog.Info(ctx, "LimitIOCreateDisk: response from CloudBroker().Disks().LimitIO", map[string]any{
|
||||
"disk_id": diskId,
|
||||
"response": res})
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
// ShareableCreateDisk shares disk.
|
||||
// In case of failure returns warnings.
|
||||
func ShareableCreateDisk(ctx context.Context, diskId uint64, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
tflog.Info(ctx, "ShareableCreateDisk: before calling CloudBroker().Disks().Share", map[string]any{"disk_id": diskId})
|
||||
res, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{DiskID: diskId})
|
||||
if err != nil {
|
||||
diags.AddWarning("ShareableCreateDisk: Unable to share Disk",
|
||||
err.Error())
|
||||
}
|
||||
tflog.Info(ctx, "ShareableCreateDisk: response from CloudBroker().Disks().Share", map[string]any{
|
||||
"disk_id": diskId,
|
||||
"response": res})
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
// DiskReadStatus loads disk resource by ids id, gets it current status. Performs restore and enable if needed for
|
||||
// Deleted status.
|
||||
// In case of failure returns errors.
|
||||
func DiskReadStatus(ctx context.Context, state *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "DiskReadStatus: Read status disk with ID", map[string]any{"disk_id": state.Id.ValueString()})
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
diskId, err := strconv.ParseUint(state.Id.ValueString(), 10, 64)
|
||||
if err != nil {
|
||||
diags.AddError("DiskReadStatus: Cannot parse disk ID from state", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
recordDisk, err := DiskCheckPresence(ctx, diskId, c)
|
||||
if err != nil {
|
||||
diags.AddError("DiskReadStatus: Unable to Read Disk before status check", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
// check resource status
|
||||
switch recordDisk.Status {
|
||||
case status.Modeled:
|
||||
diags.AddError(
|
||||
"Disk is in status Modeled",
|
||||
"please, contact support for more information",
|
||||
)
|
||||
return diags
|
||||
case status.Deleted:
|
||||
// attempt to restore disk
|
||||
tflog.Info(ctx, "DiskReadStatus: disk with status.Deleted is being read, attempt to restore it", map[string]any{
|
||||
"disk_id": recordDisk.ID,
|
||||
"status": recordDisk.Status})
|
||||
diags.Append(RestoreDisk(ctx, diskId, c)...)
|
||||
if diags.HasError() {
|
||||
tflog.Error(ctx, "DiskReadStatus: cannot restore disk")
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, "DiskReadStatus: disk restored successfully", map[string]any{"disk_id": diskId})
|
||||
state.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
|
||||
case status.Destroyed, status.Purged:
|
||||
diags.AddError(
|
||||
"DiskReadStatus: Disk is in status Destroyed or Purged",
|
||||
fmt.Sprintf("the resource with disk_id %d cannot be read because it has been destroyed or purged", diskId),
|
||||
)
|
||||
return diags
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestoreDisk performs disk Restore request.
|
||||
// Returns error in case of failures.
|
||||
func RestoreDisk(ctx context.Context, diskId uint64, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
restoreReq := disks.RestoreRequest{
|
||||
DiskID: diskId,
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RestoreDisk: before calling CloudBroker().Disks().Restore", map[string]any{"diskId": diskId, "req": restoreReq})
|
||||
|
||||
res, err := c.CloudBroker().Disks().Restore(ctx, restoreReq)
|
||||
if err != nil {
|
||||
diags.AddError(
|
||||
"RestoreDisk: cannot restore disk",
|
||||
err.Error(),
|
||||
)
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, "RestoreDisk: response from CloudBroker().Disks().Restore", map[string]any{"disk_id": diskId, "response": res})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SizeMaxUpdateDisk resizes disk.
|
||||
// Returns error in case of failures.
|
||||
func SizeMaxUpdateDisk(ctx context.Context, diskId uint64, plan, state *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
||||
resizeReq := disks.ResizeRequest{
|
||||
DiskID: diskId,
|
||||
}
|
||||
|
||||
// check if resize request is valid
|
||||
if plan.SizeMax.ValueInt64() < state.SizeMax.ValueInt64() {
|
||||
diags.AddError(
|
||||
"SizeMaxUpdateDisk: reducing disk size is not allowed",
|
||||
fmt.Sprintf("disk with id %s has state size %d, plan size %d",
|
||||
plan.Id.ValueString(),
|
||||
state.SizeMax.ValueInt64(),
|
||||
plan.SizeMax.ValueInt64()))
|
||||
return diags
|
||||
}
|
||||
|
||||
resizeReq.Size = uint64(plan.SizeMax.ValueInt64())
|
||||
|
||||
tflog.Info(ctx, "SizeMaxUpdateDisk: before calling CloudBroker().Disks().Resize2", map[string]any{
|
||||
"disk_id": plan.Id.ValueString(),
|
||||
"size_max_state": state.SizeMax.ValueInt64(),
|
||||
"size_max_plan": plan.SizeMax.ValueInt64(),
|
||||
"req": resizeReq,
|
||||
})
|
||||
|
||||
res, err := c.CloudBroker().Disks().Resize2(ctx, resizeReq)
|
||||
|
||||
if err != nil {
|
||||
diags.AddError("can not resize disk", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "SizeMaxUpdateDisk: response from CloudBroker().Disks().Resize2", map[string]any{
|
||||
"disk_id": plan.Id.ValueString(),
|
||||
"response": res})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NameUpdateDisk renames disk.
|
||||
// Returns error in case of failures.
|
||||
func NameUpdateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
var diags diag.Diagnostics
|
||||
|
||||
renameReq := disks.RenameRequest{
|
||||
DiskID: diskId,
|
||||
Name: plan.DiskName.ValueString(),
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "NameUpdateDisk: before calling CloudBroker().Disks().Rename", map[string]any{
|
||||
"disk_id": plan.Id.ValueString(),
|
||||
"disk_name_plan": plan.DiskName.ValueString(),
|
||||
"req": renameReq,
|
||||
})
|
||||
|
||||
res, err := c.CloudBroker().Disks().Rename(ctx, renameReq)
|
||||
|
||||
if err != nil {
|
||||
diags.AddError("NameUpdateDisk: can not rename disk", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "NameUpdateDisk: response from CloudBroker().Disks().Rename", map[string]any{
|
||||
"disk_id": plan.Id.ValueString(),
|
||||
"response": res})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LimitIOUpdateDisk changes IO limits that user specified in iotune field for updated resource.
|
||||
// In case of failure returns errors.
|
||||
func LimitIOUpdateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
limitIOReq := disks.LimitIORequest{
|
||||
DiskID: diskId,
|
||||
}
|
||||
|
||||
var iotunePlan models.ResourceIOTuneModel
|
||||
// plan.IOTune is not null as it was checked before call
|
||||
tflog.Info(ctx, "LimitIOUpdateDisk: new iotune specified", map[string]any{"disk_id": diskId})
|
||||
diags.Append(plan.IOTune.As(ctx, &iotunePlan, basetypes.ObjectAsOptions{})...)
|
||||
if diags.HasError() {
|
||||
tflog.Error(ctx, "LimitIOUpdateDisk: cannot populate iotune with plan.IOTune object element")
|
||||
return diags
|
||||
}
|
||||
|
||||
limitIOReq.IOPS = uint64(iotunePlan.TotalIOPSSec.ValueInt64())
|
||||
|
||||
limitIOReq.ReadBytesSec = uint64(iotunePlan.ReadBytesSec.ValueInt64())
|
||||
limitIOReq.ReadBytesSecMax = uint64(iotunePlan.ReadBytesSecMax.ValueInt64())
|
||||
limitIOReq.ReadIOPSSec = uint64(iotunePlan.ReadIOPSSec.ValueInt64())
|
||||
limitIOReq.ReadIOPSSecMax = uint64(iotunePlan.ReadIOPSSecMax.ValueInt64())
|
||||
|
||||
limitIOReq.SizeIOPSSec = uint64(iotunePlan.SizeIOPSSec.ValueInt64())
|
||||
limitIOReq.TotalBytesSec = uint64(iotunePlan.TotalBytesSec.ValueInt64())
|
||||
limitIOReq.TotalBytesSecMax = uint64(iotunePlan.TotalBytesSecMax.ValueInt64())
|
||||
limitIOReq.TotalIOPSSecMax = uint64(iotunePlan.TotalIOPSSecMax.ValueInt64())
|
||||
limitIOReq.TotalIOPSSec = uint64(iotunePlan.TotalIOPSSec.ValueInt64())
|
||||
|
||||
limitIOReq.WriteBytesSec = uint64(iotunePlan.WriteBytesSec.ValueInt64())
|
||||
limitIOReq.WriteBytesSecMax = uint64(iotunePlan.WriteBytesSecMax.ValueInt64())
|
||||
limitIOReq.WriteIOPSSec = uint64(iotunePlan.WriteIOPSSec.ValueInt64())
|
||||
limitIOReq.WriteIOPSSecMax = uint64(iotunePlan.WriteIOPSSecMax.ValueInt64())
|
||||
|
||||
tflog.Info(ctx, "LimitIOUpdateDisk: before calling CloudBroker().Disks().LimitIO", map[string]any{
|
||||
"disk_id": diskId,
|
||||
"limitIOReq": limitIOReq})
|
||||
res, err := c.CloudBroker().Disks().LimitIO(ctx, limitIOReq)
|
||||
if err != nil {
|
||||
diags.AddError("LimitIOUpdateDisk: Unable to limit io for Disk",
|
||||
err.Error())
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, "LimitIOUpdateDisk: response from CloudBroker().Disks().LimitIO", map[string]any{
|
||||
"disk_id": diskId,
|
||||
"response": res})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShareableUpdateDisk shares or unshares disk.
|
||||
// In case of failure returns errors.
|
||||
func ShareableUpdateDisk(ctx context.Context, diskId uint64, share bool, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
// share
|
||||
if share {
|
||||
tflog.Info(ctx, "ShareableUpdateDisk: before calling CloudBroker().Disks().Share", map[string]any{"disk_id": diskId})
|
||||
res, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{DiskID: diskId})
|
||||
if err != nil {
|
||||
diags.AddError("ShareableUpdateDisk: Unable to share Disk",
|
||||
err.Error())
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, "ShareableUpdateDisk: response from CloudBroker().Disks().Share", map[string]any{
|
||||
"disk_id": diskId,
|
||||
"response": res})
|
||||
}
|
||||
|
||||
// unshare
|
||||
if !share {
|
||||
tflog.Info(ctx, "ShareableUpdateDisk: before calling CloudBroker().Disks().Unshare", map[string]any{"disk_id": diskId})
|
||||
res, err := c.CloudBroker().Disks().Unshare(ctx, disks.UnshareRequest{DiskID: diskId})
|
||||
if err != nil {
|
||||
diags.AddError("ShareableUpdateDisk: Unable to unshare Disk",
|
||||
err.Error())
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, "ShareableUpdateDisk: response from CloudBroker().Disks().Unshare", map[string]any{
|
||||
"disk_id": diskId,
|
||||
"response": res})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ResourceDiskChangeNodes(ctx context.Context, diskID uint64, state, plan *models.ResourceDiskModel, afterCreate bool, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
presentIDs := make([]uint64, 0)
|
||||
|
||||
newNodes := make([]uint64, 0, len(plan.NodeIDs.Elements()))
|
||||
diagsI := plan.NodeIDs.ElementsAs(ctx, &newNodes, true)
|
||||
if diagsI.HasError() {
|
||||
tflog.Error(ctx, "resourceDiskChangeNodes: cannot populate newNodeIDs with plan.NodeIDs list elements")
|
||||
diags.AddWarning("resourceDiskChangeNodes: cannot populate newNodeIDs with plan.NodeIDs list elements",
|
||||
fmt.Sprintf("%v", diagsI))
|
||||
return diags
|
||||
}
|
||||
|
||||
oldNodes := make([]uint64, 0)
|
||||
|
||||
if afterCreate {
|
||||
presentIDs = newNodes
|
||||
} else {
|
||||
diagsI = state.NodeIDs.ElementsAs(ctx, &oldNodes, true)
|
||||
if diagsI.HasError() {
|
||||
tflog.Error(ctx, "resourceDiskChangeNodes: cannot populate oldNodes with state.NodeIDs list elements")
|
||||
diags.AddWarning("resourceDiskChangeNodes: cannot populate oldNodes with state.NodeIDs list elements",
|
||||
fmt.Sprintf("%v", diagsI))
|
||||
return diags
|
||||
}
|
||||
presentIDs = difference(newNodes, oldNodes)
|
||||
}
|
||||
|
||||
for _, presentID := range presentIDs {
|
||||
|
||||
req := disks.PresentRequest{
|
||||
DiskID: diskID,
|
||||
NodeID: presentID,
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Disks().Present(ctx, req)
|
||||
if err != nil {
|
||||
diags.AddWarning("resourceDiskChangeNodes: Unable to present presents disk to node",
|
||||
err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if afterCreate {
|
||||
return diags
|
||||
}
|
||||
|
||||
depresentIDs := difference(oldNodes, newNodes)
|
||||
if len(depresentIDs) > 0 {
|
||||
for _, nodeID := range depresentIDs {
|
||||
|
||||
req := disks.DepresentRequest{
|
||||
DiskID: diskID,
|
||||
NodeID: nodeID,
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Disks().Depresent(ctx, req)
|
||||
if err != nil {
|
||||
diags.AddWarning("resourceDiskChangeNodes: Unable to depresent depresents disk from node",
|
||||
err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
func difference(set, check []uint64) []uint64 {
|
||||
mapCheck := make(map[uint64]struct{})
|
||||
for _, id := range check {
|
||||
mapCheck[id] = struct{}{}
|
||||
}
|
||||
var diff []uint64
|
||||
for _, id := range set {
|
||||
if _, ok := mapCheck[id]; !ok {
|
||||
diff = append(diff, uint64(id))
|
||||
}
|
||||
}
|
||||
return diff
|
||||
}
|
||||
@@ -0,0 +1,172 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
)
|
||||
|
||||
func UtilityDiskReplicationUpdateStartStop(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
diskId := uint64(state.DiskId.ValueInt64())
|
||||
targetDiskId := uint64(state.ReplicationId.ValueInt64())
|
||||
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: start update for disk replica with ID: %d", diskId))
|
||||
|
||||
ok := !(state.Start.IsNull() || state.Start.IsUnknown())
|
||||
start := state.Start.ValueBool()
|
||||
|
||||
if ok && start {
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: start disk replication from Disk with ID: %d to Disk with ID: %d", diskId, targetDiskId))
|
||||
req := disks.ReplicationStartRequest{
|
||||
DiskID: diskId,
|
||||
TargetDiskID: targetDiskId,
|
||||
}
|
||||
_, err := c.CloudBroker().Disks().ReplicationStart(ctx, req)
|
||||
if err != nil {
|
||||
diags.AddError("UtilityDiskReplicationUpdateStartStop: Unable to start replicate disk", err.Error())
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: start disk replication from Disk with ID: %d to Disk with ID: %d, complete", diskId, targetDiskId))
|
||||
|
||||
}
|
||||
|
||||
if ok && !start {
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: stop disk replication from Disk with ID: %d to Disk with ID: %d", targetDiskId, diskId))
|
||||
req := disks.ReplicationStopRequest{
|
||||
DiskID: targetDiskId,
|
||||
}
|
||||
_, err := c.CloudBroker().Disks().ReplicationStop(ctx, req)
|
||||
if err != nil {
|
||||
diags.AddError("UtilityDiskReplicationUpdateStartStop: Unable to stop replicate disk", err.Error())
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: stop disk replication from Disk with ID: %d to Disk with ID: %d, complete", targetDiskId, diskId))
|
||||
}
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: complete update for disk replica with ID: %d", diskId))
|
||||
return nil
|
||||
}
|
||||
|
||||
func UtilityDiskReplicationUpdatePause(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
diskId := uint64(state.DiskId.ValueInt64())
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: start update for disk replica with ID: %d", diskId))
|
||||
pause := state.Pause.ValueBool()
|
||||
ok := !(state.Pause.IsNull() || state.Pause.IsUnknown())
|
||||
|
||||
if ok && pause {
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: pause disk replication with ID: %d", diskId))
|
||||
req := disks.ReplicationSuspendRequest{
|
||||
DiskID: diskId,
|
||||
}
|
||||
_, err := c.CloudBroker().Disks().ReplicationSuspend(ctx, req)
|
||||
if err != nil {
|
||||
diags.AddError("utilityDiskReplicationUpdatePause: Unable to pause disk", err.Error())
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: pause disk replication with ID: %d, complete", diskId))
|
||||
}
|
||||
|
||||
if ok && !pause {
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: resume disk replication with ID: %d", diskId))
|
||||
req := disks.ReplicationResumeRequest{
|
||||
DiskID: diskId,
|
||||
}
|
||||
_, err := c.CloudBroker().Disks().ReplicationResume(ctx, req)
|
||||
if err != nil {
|
||||
diags.AddError("utilityDiskReplicationUpdatePause: Unable to resume disk", err.Error())
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: resume disk replication with ID: %d, complete", diskId))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: complete update for disk replica with ID: %d", diskId))
|
||||
return nil
|
||||
}
|
||||
|
||||
func UtilityDiskReplicationUpdateReverse(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
diskId := uint64(state.DiskId.ValueInt64())
|
||||
targetDiskId := uint64(state.ReplicationId.ValueInt64())
|
||||
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: start update for disk replica with ID: %d", diskId))
|
||||
|
||||
reverse := state.Reverse.ValueBool()
|
||||
ok := !(state.Reverse.IsNull() || state.Reverse.IsUnknown())
|
||||
|
||||
if ok && reverse {
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: reverse disk replication from Disk with ID: %d to Disk with ID: %d", diskId, targetDiskId))
|
||||
req := disks.ReplicationReverseRequest{
|
||||
DiskID: diskId,
|
||||
}
|
||||
_, err := c.CloudBroker().Disks().ReplicationReverse(ctx, req)
|
||||
if err != nil {
|
||||
diags.AddError("utilityDiskReplicationUpdateReverse: Unable to reverse disk", err.Error())
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: reverse disk replication from Disk with ID: %d to Disk with ID: %d, complete", diskId, targetDiskId))
|
||||
}
|
||||
|
||||
if ok && !reverse {
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: reverse disk replication from Disk with ID: %d to Disk with ID: %d", targetDiskId, diskId))
|
||||
req := disks.ReplicationReverseRequest{
|
||||
DiskID: targetDiskId,
|
||||
}
|
||||
_, err := c.CloudBroker().Disks().ReplicationReverse(ctx, req)
|
||||
if err != nil {
|
||||
diags.AddError("utilityDiskReplicationUpdateReverse: Unable to reverse disk", err.Error())
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: reverse disk replication from Disk with ID: %d to Disk with ID: %d, complete", targetDiskId, diskId))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: complete update for disk replica with ID: %d", diskId))
|
||||
return nil
|
||||
}
|
||||
|
||||
func ResourceDiskReplicationCheckPresence(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) (*disks.RecordDisk, *string, error) {
|
||||
|
||||
status, err := c.CloudBroker().Disks().ReplicationStatus(ctx, disks.ReplicationStatusRequest{DiskID: uint64(state.DiskId.ValueInt64())})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req := disks.GetRequest{}
|
||||
|
||||
if !state.DiskId.IsNull() && !state.DiskId.IsUnknown() {
|
||||
req.DiskID = uint64(state.DiskId.ValueInt64())
|
||||
} else {
|
||||
req.DiskID = uint64(state.ReplicationId.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "ResourceDiskReplicationCheckPresence: load disk")
|
||||
disk, err := c.CloudBroker().Disks().Get(ctx, req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return disk, &status, nil
|
||||
}
|
||||
|
||||
// DiskReadStatus loads disk resource by ids id, gets it current status.
|
||||
// In case of failure returns errors.
|
||||
func ReplicationDiskReadStatus(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
tflog.Info(ctx, "ReplicationDiskReadStatus: Read status disk with ID", map[string]any{"disk_id": state.DiskId.ValueInt64()})
|
||||
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
_, _, err := ResourceDiskReplicationCheckPresence(ctx, state, c)
|
||||
if err != nil {
|
||||
diags.AddError("ReplicationDiskReadStatus: Unable to Read Disk before status check", err.Error())
|
||||
return diags
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
||||
)
|
||||
|
||||
// DiskSnapshotCheckPresence checks if disk snapshot exists
|
||||
func DiskSnapshotCheckPresence(ctx context.Context, plan *models.ResourceDiskSnapshotModel, c *decort.DecortClient) (*disks.ItemSnapshot, diag.Diagnostics) {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
// take diskId and label from plan
|
||||
diskId := uint64(plan.DiskID.ValueInt64())
|
||||
label := plan.Label.ValueString()
|
||||
|
||||
// take diskId and label from Id for imported resource
|
||||
if strings.Contains(plan.Id.ValueString(), "#") {
|
||||
diskIdInt, err := strconv.Atoi(strings.Split(plan.Id.ValueString(), "#")[0])
|
||||
if err != nil {
|
||||
diags.AddError("Cannot parse disk ID from state", err.Error())
|
||||
return nil, diags
|
||||
}
|
||||
diskId = uint64(diskIdInt)
|
||||
label = strings.Split(plan.Id.ValueString(), "#")[1]
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "Start DiskSnapshotCheckPresence", map[string]any{
|
||||
"disk_id": diskId,
|
||||
"label": label,
|
||||
"id": plan.Id.ValueString(),
|
||||
})
|
||||
|
||||
tflog.Info(ctx, "DiskSnapshotCheckPresence: before call CloudBroker().Disks().Get", map[string]any{"disk_id": diskId})
|
||||
disk, err := c.CloudBroker().Disks().Get(ctx, disks.GetRequest{DiskID: diskId})
|
||||
if err != nil {
|
||||
diags.AddError(
|
||||
fmt.Sprintf("Cannot get info about disk with disk_id %d", diskId),
|
||||
err.Error(),
|
||||
)
|
||||
return nil, diags
|
||||
}
|
||||
tflog.Info(ctx, "DiskSnapshotCheckPresence: response from CloudBroker().Disks().Get", map[string]any{"response": disk})
|
||||
|
||||
for _, sn := range disk.Snapshots {
|
||||
if label == sn.Label {
|
||||
return &sn, nil
|
||||
}
|
||||
}
|
||||
|
||||
diags.AddError(
|
||||
"Snapshot not found",
|
||||
fmt.Sprintf("Snapshot with label %s for disk with disk_id %d not found", label, diskId),
|
||||
)
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
// RollbackDiskSnapshot rollbacks disk snapshot.
|
||||
// Returns error in case of failures.
|
||||
func RollbackDiskSnapshot(ctx context.Context, plan *models.ResourceDiskSnapshotModel, c *decort.DecortClient) diag.Diagnostics {
|
||||
diags := diag.Diagnostics{}
|
||||
|
||||
rollbackReq := disks.SnapshotRollbackRequest{
|
||||
DiskID: uint64(plan.DiskID.ValueInt64()),
|
||||
Label: plan.Label.ValueString(),
|
||||
}
|
||||
|
||||
if !plan.TimeStamp.IsUnknown() {
|
||||
rollbackReq.TimeStamp = uint64(plan.TimeStamp.ValueInt64())
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "RollbackDiskSnapshot: before calling CloudBroker().Disks().SnapshotRollback", map[string]any{"req": rollbackReq})
|
||||
res, err := c.CloudBroker().Disks().SnapshotRollback(ctx, rollbackReq)
|
||||
if err != nil {
|
||||
diags.AddError(
|
||||
"RollbackDiskSnapshot: Cannot rollback snapshot",
|
||||
err.Error(),
|
||||
)
|
||||
return diags
|
||||
}
|
||||
tflog.Info(ctx, "RollbackDiskSnapshot: response from CloudBroker().Disks().SnapshotRollback", map[string]any{
|
||||
"disk_id": plan.DiskID.ValueInt64(),
|
||||
"label": plan.Label.ValueString(),
|
||||
"response": res})
|
||||
|
||||
return nil
|
||||
}
|
||||
Reference in New Issue
Block a user