This commit is contained in:
asteam
2024-12-04 13:18:58 +03:00
parent 003e4d656e
commit 76ea459b3d
417 changed files with 30051 additions and 975 deletions

View File

@@ -49,9 +49,8 @@ type ResourceAccountModel struct {
}
type UsersModel struct {
UserID types.String `tfsdk:"user_id"`
AccessType types.String `tfsdk:"access_type"`
RecursiveDelete types.Bool `tfsdk:"recursive_delete"`
UserID types.String `tfsdk:"user_id"`
AccessType types.String `tfsdk:"access_type"`
}
type ResourceLimitsInAccountResourceModel struct {

View File

@@ -46,10 +46,6 @@ func MakeSchemaResourceAccount() map[string]schema.Attribute {
"access_type": schema.StringAttribute{
Required: true,
},
"recursive_delete": schema.BoolAttribute{
Optional: true,
// default is false
},
},
},
},

View File

@@ -451,9 +451,8 @@ func AddDeleteUsersAccount(ctx context.Context, accountId uint64, plan, state *m
for _, user := range deletedUsers {
delUserReq := account.DeleteUserRequest{
AccountID: accountId,
UserName: user.UserID.ValueString(),
RecursiveDelete: user.RecursiveDelete.ValueBool(), // default false
AccountID: accountId,
UserName: user.UserID.ValueString(),
}
tflog.Info(ctx, "AddDeleteUsersAccount: before calling CloudBroker().Account().DeleteUser", map[string]any{"account_id": accountId, "req": delUserReq})

View File

@@ -29,7 +29,6 @@ func AuditDataSource(ctx context.Context, state *models.DataSourceAudit, c *deco
AuditGuid: state.AuditGuid,
Timeouts: state.Timeouts,
Apitask: types.StringValue(recordAudit.Apitask),
Arguments: types.StringValue(recordAudit.Arguments),
Call: types.StringValue(recordAudit.Call),
GUID: types.StringValue(recordAudit.GUID),

View File

@@ -11,7 +11,6 @@ type DataSourceAudit struct {
Timeouts timeouts.Value `tfsdk:"timeouts"`
//response field
Apitask types.String `tfsdk:"apitask"`
Arguments types.String `tfsdk:"args"`
Call types.String `tfsdk:"call"`
GUID types.String `tfsdk:"guid"`

View File

@@ -9,9 +9,6 @@ func MakeSchemaDataSourceAudit() map[string]schema.Attribute {
"audit_guid": schema.StringAttribute{
Required: true,
},
"apitask": schema.StringAttribute{
Computed: true,
},
"args": schema.StringAttribute{
Computed: true,
},

View File

@@ -0,0 +1,91 @@
package cbDisks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDisk{}
)
func NewDataSourceDisk() datasource.DataSource {
return &dataSourceDisk{}
}
// dataSourceDisk is the data source implementation.
type dataSourceDisk struct {
client *decort.DecortClient
}
func (d *dataSourceDisk) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DiskModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDisk: Error get state")
return
}
diskId := uint64(state.DiskID.ValueInt64())
tflog.Info(ctx, "Read dataSourceDisk: got state successfully", map[string]any{"disk_id": diskId})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDisk: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDisk: set timeouts successfully", map[string]any{
"disk_id": diskId,
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDisk: Error flatten data source disk")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDisk: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDisk", map[string]any{"disk_id": diskId})
}
func (d *dataSourceDisk) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDisk(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDisk) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_disk"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDisk) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDisk")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDisk successfully")
}

View File

@@ -0,0 +1,88 @@
package cbDisks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDiskList{}
)
func NewdataSourceDiskList() datasource.DataSource {
return &dataSourceDiskList{}
}
// dataSourceDisk is the data source implementation.
type dataSourceDiskList struct {
client *decort.DecortClient
}
func (d *dataSourceDiskList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DisksModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskList: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceDiskList: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskList: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDiskList: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskListDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskList: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskList: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDiskList")
}
func (d *dataSourceDiskList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDiskList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDiskList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_disk_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDiskList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDiskList")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDiskList successfully")
}

View File

@@ -0,0 +1,88 @@
package cbDisks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDiskListDeleted{}
)
func NewDataSourceDiskListDeleted() datasource.DataSource {
return &dataSourceDiskListDeleted{}
}
// dataSourceDisk is the data source implementation.
type dataSourceDiskListDeleted struct {
client *decort.DecortClient
}
func (d *dataSourceDiskListDeleted) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.ListDisksModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceDiskListDeleted: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDiskListDeleted: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskListDeletedDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDiskListDeleted")
}
func (d *dataSourceDiskListDeleted) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDiskListDeleted(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDiskListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_disk_list_deleted"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDiskListDeleted) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDiskListDeleted")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDiskListDeleted successfully")
}

View File

@@ -0,0 +1,88 @@
package cbDisks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDiskListTypes{}
)
func NewDataSourceDiskListTypes() datasource.DataSource {
return &dataSourceDiskListTypes{}
}
// dataSourceDiskListTypes is the data source implementation.
type dataSourceDiskListTypes struct {
client *decort.DecortClient
}
func (d *dataSourceDiskListTypes) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceDiskListTypesModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListTypes: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceDiskListTypes: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListTypes: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDiskListTypes: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskListTypesDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListTypes: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListTypes: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDiskListTypes")
}
func (d *dataSourceDiskListTypes) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDiskListTypes(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDiskListTypes) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_disk_list_types"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDiskListTypes) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDiskListTypes")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDiskListTypes successfully")
}

View File

@@ -0,0 +1,88 @@
package cbDisks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDiskListTypesDetailed{}
)
func NewDataSourceDiskListTypesDetailed() datasource.DataSource {
return &dataSourceDiskListTypesDetailed{}
}
// dataSourceDiskListTypesDetailed is the data source implementation.
type dataSourceDiskListTypesDetailed struct {
client *decort.DecortClient
}
func (d *dataSourceDiskListTypesDetailed) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceDiskListTypesDetailedModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceDiskListTypesDetailed: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDiskListTypesDetailed: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskListTypesDetailedDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDiskListTypesDetailed")
}
func (d *dataSourceDiskListTypesDetailed) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDiskListTypesDetailed(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDiskListTypesDetailed) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_disk_list_types_detailed"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDiskListTypesDetailed) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDiskListTypesDetailed")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDiskListTypesDetailed successfully")
}

View File

@@ -0,0 +1,88 @@
package cbDisks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDiskListUnattached{}
)
func NewDataSourceDiskListUnattached() datasource.DataSource {
return &dataSourceDiskListUnattached{}
}
// dataSourceDiskListUnattached is the data source implementation.
type dataSourceDiskListUnattached struct {
client *decort.DecortClient
}
func (d *dataSourceDiskListUnattached) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceDiskListUnattachedModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceDiskListUnattached: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDiskListUnattached: set timeouts successfully", map[string]any{"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskListUnattachedDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDiskListUnattached")
}
func (d *dataSourceDiskListUnattached) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDiskListUnattached(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDiskListUnattached) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_disk_list_unattached"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDiskListUnattached) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDiskListUnattached")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDiskListUnattached successfully")
}

View File

@@ -0,0 +1,91 @@
package cbDisks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDiskReplication{}
)
func NewDataSourceDiskReplication() datasource.DataSource {
return &dataSourceDiskReplication{}
}
// dataSourceDisk is the data source implementation.
type dataSourceDiskReplication struct {
client *decort.DecortClient
}
func (d *dataSourceDiskReplication) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.RecordDiskModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskReplication: Error get state")
return
}
diskId := uint64(state.DiskId.ValueInt64())
tflog.Info(ctx, "Read dataSourceDiskReplication: got state successfully", map[string]any{"disk_id": diskId})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskReplication: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDiskReplication: set timeouts successfully", map[string]any{
"disk_id": diskId,
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskReplicationDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskReplication: Error flatten data source disk")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskReplication: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDiskReplication", map[string]any{"disk_id": diskId})
}
func (d *dataSourceDiskReplication) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDiskReplication(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDiskReplication) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_disk_replication"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDiskReplication) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDisk")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDisk successfully")
}

View File

@@ -0,0 +1,96 @@
package cbDisks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDiskSnapshot{}
)
func NewDataSourceDiskSnapshot() datasource.DataSource {
return &dataSourceDiskSnapshot{}
}
// dataSourceDiskSnapshotList is the data source implementation.
type dataSourceDiskSnapshot struct {
client *decort.DecortClient
}
func (d *dataSourceDiskSnapshot) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceDiskSnapshotModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error get state")
return
}
ctxSnpsht := map[string]any{
"disk_id": uint64(state.DiskID.ValueInt64()),
"label": state.Label.ValueString(),
}
tflog.Info(ctx, "Read dataSourceDiskSnapshot: got state successfully", ctxSnpsht)
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDiskSnapshot: set timeouts successfully", map[string]any{
"disk_id": uint64(state.DiskID.ValueInt64()),
"label": state.Label.ValueString(),
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskSnapshotDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error flatten data source")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDiskSnapshot", ctxSnpsht)
}
func (d *dataSourceDiskSnapshot) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDiskSnapshot(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDiskSnapshot) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_disk_snapshot"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDiskSnapshot) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDiskSnapshot")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDiskSnapshot successfully")
}

View File

@@ -0,0 +1,91 @@
package cbDisks
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDiskSnapshotList{}
)
func NewDataSourceDiskSnapshotList() datasource.DataSource {
return &dataSourceDiskSnapshotList{}
}
// dataSourceDiskSnapshotList is the data source implementation.
type dataSourceDiskSnapshotList struct {
client *decort.DecortClient
}
func (d *dataSourceDiskSnapshotList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceDiskSnapshotListModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error get state")
return
}
diskId := uint64(state.DiskID.ValueInt64())
tflog.Info(ctx, "Read dataSourceDiskSnapshotList: got state successfully", map[string]any{"disk_id": diskId})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDiskSnapshotList: set timeouts successfully", map[string]any{
"disk_id": diskId,
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DiskSnapshotListDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error flatten data source")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDiskSnapshotList", map[string]any{"disk_id": diskId})
}
func (d *dataSourceDiskSnapshotList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDiskSnapshotList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDiskSnapshotList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_disk_snapshot_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDiskSnapshotList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDiskSnapshotList")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDiskSnapshotList successfully")
}

View File

@@ -0,0 +1,119 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
)
// DiskDataSource flattens data source for disk list.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskDataSource(ctx context.Context, state *models.DiskModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskDataSource")
diags := diag.Diagnostics{}
recordDisk, diags := utilities.DataSourceDiskCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.DiskDataSource: before flatten")
diskAcl, _ := json.Marshal(recordDisk.ACL)
*state = models.DiskModel{
DiskID: state.DiskID,
Timeouts: state.Timeouts,
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
AccountName: types.StringValue(recordDisk.AccountName),
SEPType: types.StringValue(recordDisk.SEPType),
ACL: types.StringValue(string(diskAcl)),
BootPartition: types.Int64Value(int64(recordDisk.BootPartition)),
Computes: flattenComputeList(ctx, recordDisk.Computes),
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
Description: types.StringValue(recordDisk.Description),
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
DiskPath: types.StringValue(recordDisk.DiskPath),
DeviceName: types.StringValue(recordDisk.DeviceName),
GID: types.Int64Value(int64(recordDisk.GID)),
GUID: types.Int64Value(int64(recordDisk.GUID)),
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
IQN: types.StringValue(recordDisk.IQN),
Login: types.StringValue(recordDisk.Login),
Milestones: types.Int64Value(int64(recordDisk.Milestones)),
Name: types.StringValue(recordDisk.Name),
Order: types.Int64Value(int64(recordDisk.Order)),
Params: types.StringValue(recordDisk.Params),
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
Password: types.StringValue(recordDisk.Password),
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
Pool: types.StringValue(recordDisk.Pool),
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)),
RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)),
ReferenceID: types.StringValue(recordDisk.ReferenceID),
ResID: types.StringValue(recordDisk.ResID),
ResName: types.StringValue(recordDisk.ResName),
Role: types.StringValue(recordDisk.Role),
SEPID: types.Int64Value(int64(recordDisk.SEPID)),
Shareable: types.BoolValue(recordDisk.Shareable),
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
Snapshots: flattenSnapshotList(ctx, recordDisk.Snapshots),
Status: types.StringValue(recordDisk.Status),
TechStatus: types.StringValue(recordDisk.TechStatus),
Type: types.StringValue(recordDisk.Type),
VMID: types.Int64Value(int64(recordDisk.VMID)),
}
state.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskDataSource: cannot flatten recordDisk.Images to state.Images", diags))
}
state.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskDataSource: cannot flatten recordDisk.PresentTo to state.PresentTo", diags))
}
iotune := models.IOModel{
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
}
state.IOTune = &iotune
repl := models.ItemReplModel{
DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)),
PoolID: types.StringValue(recordDisk.Replication.PoolID),
Role: types.StringValue(recordDisk.Replication.Role),
SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID),
StorageID: types.StringValue(recordDisk.Replication.StorageID),
VolumeID: types.StringValue(recordDisk.Replication.VolumeID),
}
state.Replication = &repl
tflog.Info(ctx, "flattens.DiskDataSource: end flatten")
return nil
}

View File

@@ -0,0 +1,180 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
)
// DiskListDataSource flattens data source for disk list.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskListDataSource(ctx context.Context, state *models.DisksModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskListDataSource")
diags := diag.Diagnostics{}
diskList, diags := utilities.DataSourceDiskListCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.DiskListDataSource: before flatten")
*state = models.DisksModel{
ByID: state.ByID,
Name: state.Name,
AccountName: state.AccountName,
DiskMaxSize: state.DiskMaxSize,
Shared: state.Shared,
AccountID: state.AccountID,
Type: state.Type,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
// computed fields
EntryCount: types.Int64Value(int64(diskList.EntryCount)),
}
items := make([]models.DiskItemModel, 0, diskList.EntryCount)
for _, recordDisk := range diskList.Data {
diskAcl, _ := json.Marshal(recordDisk.ACL)
d := models.DiskItemModel{
MachineID: types.Int64Value(int64(recordDisk.MachineID)),
MachineName: types.StringValue(recordDisk.MachineName),
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
AccountName: types.StringValue(recordDisk.AccountName),
SEPType: types.StringValue(recordDisk.SEPType),
ACL: types.StringValue(string(diskAcl)),
BootPartition: types.Int64Value(int64(recordDisk.BootPartition)),
Computes: flattenComputeList(ctx, recordDisk.Computes),
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
Description: types.StringValue(recordDisk.Description),
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
DiskPath: types.StringValue(recordDisk.DiskPath),
DeviceName: types.StringValue(recordDisk.DeviceName),
GID: types.Int64Value(int64(recordDisk.GID)),
GUID: types.Int64Value(int64(recordDisk.GUID)),
ID: types.Int64Value(int64(recordDisk.ID)),
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
IQN: types.StringValue(recordDisk.IQN),
Login: types.StringValue(recordDisk.Login),
Milestones: types.Int64Value(int64(recordDisk.Milestones)),
Name: types.StringValue(recordDisk.Name),
Order: types.Int64Value(int64(recordDisk.Order)),
Params: types.StringValue(recordDisk.Params),
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
Password: types.StringValue(recordDisk.Password),
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
Pool: types.StringValue(recordDisk.Pool),
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)),
RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)),
ReferenceID: types.StringValue(recordDisk.ReferenceID),
ResID: types.StringValue(recordDisk.ResID),
ResName: types.StringValue(recordDisk.ResName),
Role: types.StringValue(recordDisk.Role),
SEPID: types.Int64Value(int64(recordDisk.SEPID)),
Shareable: types.BoolValue(recordDisk.Shareable),
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
Snapshots: flattenSnapshotList(ctx, recordDisk.Snapshots),
Status: types.StringValue(recordDisk.Status),
TechStatus: types.StringValue(recordDisk.TechStatus),
Type: types.StringValue(recordDisk.Type),
VMID: types.Int64Value(int64(recordDisk.VMID)),
}
d.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskListDataSource: cannot flatten recordDisk.Images to d.Images", diags))
}
d.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskListDataSource: cannot flatten recordDisk.PresentTo to d.PresentTo", diags))
}
iotune := models.IOModel{
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
}
d.IOTune = &iotune
repl := models.ItemReplModel{
DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)),
PoolID: types.StringValue(recordDisk.Replication.PoolID),
Role: types.StringValue(recordDisk.Replication.Role),
SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID),
StorageID: types.StringValue(recordDisk.Replication.StorageID),
VolumeID: types.StringValue(recordDisk.Replication.VolumeID),
}
d.Replication = &repl
items = append(items, d)
}
state.Items = items
tflog.Info(ctx, "flattens.DiskListDataSource: end flatten")
return nil
}
func flattenComputeList(ctx context.Context, computes map[string]string) []models.ComputeModel {
tflog.Info(ctx, "Start flattenComputeList")
res := make([]models.ComputeModel, 0, len(computes))
for k, v := range computes {
temp := models.ComputeModel{
ComputeID: types.StringValue(k),
ComputeName: types.StringValue(v),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenComputeList")
return res
}
func flattenSnapshotList(ctx context.Context, snapshots disks.ListSnapshots) []models.SnapshotModel {
tflog.Info(ctx, "Start flattenSnapshotList")
res := make([]models.SnapshotModel, 0, len(snapshots))
for _, item := range snapshots {
temp := models.SnapshotModel{
GUID: types.StringValue(item.GUID),
Label: types.StringValue(item.Label),
ResID: types.StringValue(item.ResID),
SnapSetGUID: types.StringValue(item.SnapSetGUID),
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
Timestamp: types.Int64Value(int64(item.Timestamp)),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenSnapshotList")
return res
}

View File

@@ -0,0 +1,180 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
)
// DiskListDeletedDataSource flattens data source for disk list deleted.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskListDeletedDataSource(ctx context.Context, state *models.ListDisksModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskListDeletedDataSource")
diags := diag.Diagnostics{}
diskList, diags := utilities.DataSourceDiskListDeletedCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.DiskListDeletedDataSource: before flatten")
*state = models.ListDisksModel{
ByID: state.ByID,
Name: state.Name,
AccountName: state.AccountName,
DiskMaxSize: state.DiskMaxSize,
Shared: state.Shared,
AccountID: state.AccountID,
Type: state.Type,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
// computed fields
EntryCount: types.Int64Value(int64(diskList.EntryCount)),
}
items := make([]models.ItemDiskModel, 0, diskList.EntryCount)
for _, recordDisk := range diskList.Data {
diskAcl, _ := json.Marshal(recordDisk.ACL)
d := models.ItemDiskModel{
MachineID: types.Int64Value(int64(recordDisk.MachineID)),
MachineName: types.StringValue(recordDisk.MachineName),
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
AccountName: types.StringValue(recordDisk.AccountName),
SEPType: types.StringValue(recordDisk.SEPType),
ACL: types.StringValue(string(diskAcl)),
BootPartition: types.Int64Value(int64(recordDisk.BootPartition)),
Computes: flattenComputes(ctx, recordDisk.Computes),
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
Description: types.StringValue(recordDisk.Description),
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
DiskPath: types.StringValue(recordDisk.DiskPath),
DeviceName: types.StringValue(recordDisk.DeviceName),
GID: types.Int64Value(int64(recordDisk.GID)),
GUID: types.Int64Value(int64(recordDisk.GUID)),
ID: types.Int64Value(int64(recordDisk.ID)),
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
IQN: types.StringValue(recordDisk.IQN),
Login: types.StringValue(recordDisk.Login),
Milestones: types.Int64Value(int64(recordDisk.Milestones)),
Name: types.StringValue(recordDisk.Name),
Order: types.Int64Value(int64(recordDisk.Order)),
Params: types.StringValue(recordDisk.Params),
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
Password: types.StringValue(recordDisk.Password),
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
Pool: types.StringValue(recordDisk.Pool),
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)),
RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)),
ReferenceID: types.StringValue(recordDisk.ReferenceID),
ResID: types.StringValue(recordDisk.ResID),
ResName: types.StringValue(recordDisk.ResName),
Role: types.StringValue(recordDisk.Role),
SEPID: types.Int64Value(int64(recordDisk.SEPID)),
Shareable: types.BoolValue(recordDisk.Shareable),
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots),
Status: types.StringValue(recordDisk.Status),
TechStatus: types.StringValue(recordDisk.TechStatus),
Type: types.StringValue(recordDisk.Type),
VMID: types.Int64Value(int64(recordDisk.VMID)),
}
d.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskListDeletedDataSource: cannot flatten recordDisk.Images to d.Images", diags))
}
d.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskListDeletedDataSource: cannot flatten recordDisk.PresentTo to d.PresentTo", diags))
}
iotune := models.IOTuneModel{
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
}
d.IOTune = &iotune
repl := models.ItemReplicationModel{
DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)),
PoolID: types.StringValue(recordDisk.Replication.PoolID),
Role: types.StringValue(recordDisk.Replication.Role),
SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID),
StorageID: types.StringValue(recordDisk.Replication.StorageID),
VolumeID: types.StringValue(recordDisk.Replication.VolumeID),
}
d.Replication = &repl
items = append(items, d)
}
state.Items = items
tflog.Info(ctx, "flattens.DiskListDeletedDataSource: end flatten")
return nil
}
func flattenComputes(ctx context.Context, computes map[string]string) []models.ComputesModel {
tflog.Info(ctx, "Start flattenComputes")
res := make([]models.ComputesModel, 0, len(computes))
for k, v := range computes {
temp := models.ComputesModel{
ComputeID: types.StringValue(k),
ComputeName: types.StringValue(v),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenComputes")
return res
}
func flattenSnapshots(ctx context.Context, snapshots disks.ListSnapshots) []models.ItemSnapshotModel {
tflog.Info(ctx, "Start flattenSnapshots")
res := make([]models.ItemSnapshotModel, 0, len(snapshots))
for _, item := range snapshots {
temp := models.ItemSnapshotModel{
GUID: types.StringValue(item.GUID),
Label: types.StringValue(item.Label),
ResID: types.StringValue(item.ResID),
SnapSetGUID: types.StringValue(item.SnapSetGUID),
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
Timestamp: types.Int64Value(int64(item.Timestamp)),
}
res = append(res, temp)
}
tflog.Info(ctx, "End flattenSnapshots")
return res
}

View File

@@ -0,0 +1,50 @@
package flattens
import (
"context"
"fmt"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
)
// DiskListTypesDataSource flattens data source for disk list types.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskListTypesDataSource(ctx context.Context, state *models.DataSourceDiskListTypesModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskListTypesDataSource")
diags := diag.Diagnostics{}
listTypes, diags := utilities.DataSourceDiskListTypesCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.DiskListTypesDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceDiskListTypesModel{
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
// computed fields
Id: types.StringValue(id.String()),
EntryCount: types.Int64Value(int64(listTypes.EntryCount)),
}
state.Items, diags = types.ListValueFrom(ctx, types.StringType, listTypes.Data)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskListTypesDataSource: cannot flatten listTypes.Data to state.Items", diags))
}
tflog.Info(ctx, "flattens.DiskListTypesDataSource: end flatten")
return nil
}

View File

@@ -0,0 +1,82 @@
package flattens
import (
"context"
"fmt"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
)
// DiskListTypesDetailedDataSource flattens data source for disk list types detailed.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskListTypesDetailedDataSource(ctx context.Context, state *models.DataSourceDiskListTypesDetailedModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskListTypesDetailedDataSource")
diags := diag.Diagnostics{}
listTypes, diags := utilities.DataSourceDiskListTypesDetailedCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.DiskListTypesDetailedDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceDiskListTypesDetailedModel{
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
// computed fields
Id: types.StringValue(id.String()),
EntryCount: types.Int64Value(int64(listTypes.EntryCount)),
}
items := make([]models.ItemDiskTypeDetailedModel, 0, len(listTypes.Data))
for _, typeListDetailed := range listTypes.Data {
typeMap := typeListDetailed.(map[string]interface{})
t := models.ItemDiskTypeDetailedModel{
SepID: types.Int64Value(int64(typeMap["sepId"].(float64))),
SepName: types.StringValue(typeMap["sepName"].(string)),
}
var pools []models.ItemPoolModel
poolsTemp := typeMap["pools"].([]interface{})
for _, pool := range poolsTemp {
poolsMap := pool.(map[string]interface{})
p := models.ItemPoolModel{
Name: types.StringValue(poolsMap["name"].(string)),
System: types.StringValue(poolsMap["system"].(string)),
}
p.Types, diags = types.ListValueFrom(ctx, types.StringType, flattenTypes(poolsMap["types"].([]interface{})))
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskListTypesDetailedDataSource: cannot flatten poolsMap[\"types\"] to p.Types", diags))
}
pools = append(pools, p)
}
t.Pools = pools
items = append(items, t)
}
state.Items = items
tflog.Info(ctx, "flattens.DiskListTypesDetailedDataSource: end flatten")
return diags
}
func flattenTypes(typesInterface []interface{}) []string {
var typesList []string
for _, typ := range typesInterface {
typesList = append(typesList, typ.(string))
}
return typesList
}

View File

@@ -0,0 +1,166 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
)
// DiskListUnattachedDataSource flattens data source for disk list unattached.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskListUnattachedDataSource(ctx context.Context, state *models.DataSourceDiskListUnattachedModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskListUnattachedDataSource")
diags := diag.Diagnostics{}
diskList, diags := utilities.DataSourceDiskListUnattachedCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.DiskListUnattachedDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceDiskListUnattachedModel{
ByID: state.ByID,
AccountName: state.AccountName,
DiskMaxSize: state.DiskMaxSize,
Status: state.Status,
AccountID: state.AccountID,
SepID: state.SepID,
PoolName: state.PoolName,
Type: state.Type,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
// computed fields
Id: types.StringValue(id.String()),
EntryCount: types.Int64Value(int64(diskList.EntryCount)),
}
items := make([]models.ItemDiskUnattachedModel, 0, diskList.EntryCount)
for _, recordDisk := range diskList.Data {
diskAcl, _ := json.Marshal(recordDisk.ACL)
d := models.ItemDiskUnattachedModel{
CKey: types.StringValue(recordDisk.CKey),
Meta: flattens.Meta(ctx, recordDisk.Meta),
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
AccountName: types.StringValue(recordDisk.AccountName),
ACL: types.StringValue(string(diskAcl)),
BootPartition: types.Int64Value(int64(recordDisk.BootPartition)),
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
Description: types.StringValue(recordDisk.Description),
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
DiskPath: types.StringValue(recordDisk.DiskPath),
GID: types.Int64Value(int64(recordDisk.GID)),
GUID: types.Int64Value(int64(recordDisk.GUID)),
DiskId: types.Int64Value(int64(recordDisk.ID)),
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
Iqn: types.StringValue(recordDisk.IQN),
Login: types.StringValue(recordDisk.Login),
Milestones: types.Int64Value(int64(recordDisk.Milestones)),
DiskName: types.StringValue(recordDisk.Name),
Order: types.Int64Value(int64(recordDisk.Order)),
Params: types.StringValue(recordDisk.Params),
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
Passwd: types.StringValue(recordDisk.Password),
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
Pool: types.StringValue(recordDisk.Pool),
PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)),
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)),
ReferenceID: types.StringValue(recordDisk.ReferenceID),
ResID: types.StringValue(recordDisk.ResID),
ResName: types.StringValue(recordDisk.ResName),
Role: types.StringValue(recordDisk.Role),
SepID: types.Int64Value(int64(recordDisk.SEPID)),
Shareable: types.BoolValue(recordDisk.Shareable),
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
Snapshots: flattenUnattachedDiskSnapshots(ctx, recordDisk.Snapshots),
Status: types.StringValue(recordDisk.Status),
TechStatus: types.StringValue(recordDisk.TechStatus),
Type: types.StringValue(recordDisk.Type),
VMID: types.Int64Value(int64(recordDisk.VMID)),
}
d.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskListUnattachedDataSource: cannot flatten recordDisk.Images to d.Images", diags))
}
d.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskListUnattachedDataSource: cannot flatten recordDisk.PresentTo to d.PresentTo", diags))
}
iotune := models.IOTuneModel{
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemIOTune, iotune)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattens.DiskListUnattachedDataSource iotune struct to obj", diags))
}
d.IOTune = obj
items = append(items, d)
}
state.Items = items
tflog.Info(ctx, "flattens.DiskListUnattachedDataSource: end flatten")
return diags
}
func flattenUnattachedDiskSnapshots(ctx context.Context, snapshots disks.ListSnapshots) types.List {
tflog.Info(ctx, "Start flattenSnapshots")
tempSlice := make([]types.Object, 0, len(snapshots))
for _, item := range snapshots {
temp := models.DiskUnattachedItemSnapshotModel{
GUID: types.StringValue(item.GUID),
Label: types.StringValue(item.Label),
ResID: types.StringValue(item.ResID),
SnapSetGUID: types.StringValue(item.SnapSetGUID),
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
Timestamp: types.Int64Value(int64(item.Timestamp)),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemSnapshot, temp)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenSnapshots struct to obj", diags))
}
tempSlice = append(tempSlice, obj)
}
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemSnapshot}, tempSlice)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenSnapshots", diags))
}
tflog.Info(ctx, "End flattenSnapshots")
return res
}

View File

@@ -0,0 +1,167 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
)
// DiskReplicationDataSource flattens data source for disk.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskReplicationDataSource(ctx context.Context, state *models.RecordDiskModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskReplicationDataSource")
diags := diag.Diagnostics{}
recordDisk, status, err := utilities.DataSourceDiskReplicationCheckPresence(ctx, state, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about disk"), err.Error())
return diags
}
tflog.Info(ctx, "flattens.DiskReplicationDataSource: before flatten")
diskAcl, _ := json.Marshal(recordDisk.ACL)
*state = models.RecordDiskModel{
DiskId: state.DiskId,
Timeouts: state.Timeouts,
ACL: types.StringValue(string(diskAcl)),
BootPartition: types.Int64Value(int64(recordDisk.BootPartition)),
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
AccountName: types.StringValue(recordDisk.AccountName),
Computes: flattenDRComputes(ctx, recordDisk.Computes),
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
DeviceName: types.StringValue(recordDisk.DeviceName),
DiskPath: types.StringValue(recordDisk.DiskPath),
Description: types.StringValue(recordDisk.Description),
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
GID: types.Int64Value(int64(recordDisk.GID)),
GUID: types.Int64Value(int64(recordDisk.GUID)),
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
Images: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.Images),
Name: types.StringValue(recordDisk.Name),
IQN: types.StringValue(recordDisk.IQN),
Login: types.StringValue(recordDisk.Login),
Milestones: types.Int64Value(int64(recordDisk.Milestones)),
Order: types.Int64Value(int64(recordDisk.Order)),
Params: types.StringValue(recordDisk.Params),
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
Passwd: types.StringValue(recordDisk.Password),
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
Pool: types.StringValue(recordDisk.Pool),
PresentTo: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.PresentTo),
PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)),
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)),
ReferenceID: types.StringValue(recordDisk.ReferenceID),
ResID: types.StringValue(recordDisk.ResID),
ResName: types.StringValue(recordDisk.ResName),
Role: types.StringValue(recordDisk.Role),
SepType: types.StringValue(recordDisk.SEPType),
SepID: types.Int64Value(int64(recordDisk.SEPID)),
Shareable: types.BoolValue(recordDisk.Shareable),
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
Snapshots: diskListFlattenSnapshots(ctx, recordDisk.Snapshots),
Status: types.StringValue(recordDisk.Status),
StatusReplication: types.StringValue(*status),
TechStatus: types.StringValue(recordDisk.TechStatus),
Type: types.StringValue(recordDisk.Type),
VMID: types.Int64Value(int64(recordDisk.VMID)),
}
iotune := models.DiskReplicationIOTune{
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
}
state.IOTune = &iotune
itemReplication := models.ItemReplicationModel{
DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)),
PoolID: types.StringValue(recordDisk.Replication.PoolID),
Role: types.StringValue(recordDisk.Replication.Role),
SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID),
StorageID: types.StringValue(recordDisk.Replication.StorageID),
VolumeID: types.StringValue(recordDisk.Replication.VolumeID),
}
state.Replication = &itemReplication
tflog.Info(ctx, "flattens.ReplicationDiskDataSource: end flatten")
return nil
}
func flattenDRComputes(ctx context.Context, items map[string]string) types.List {
tflog.Info(ctx, "Start flattenDRComputes")
tempSlice := make([]types.Object, 0, len(items))
for id, name := range items {
temp := models.ItemComputeModel{
ComputeId: types.StringValue(id),
ComputeName: types.StringValue(name),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemCompute, temp)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenDRComputes struct to obj", diags))
}
tempSlice = append(tempSlice, obj)
}
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemCompute}, tempSlice)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenDRComputes", diags))
}
tflog.Info(ctx, "End flattenDRComputes")
return res
}
func diskListFlattenSnapshots(ctx context.Context, snapshots disks.ListSnapshots) types.List {
tflog.Info(ctx, "Start flattenSnapshots")
tempSlice := make([]types.Object, 0, len(snapshots))
for _, item := range snapshots {
temp := models.DiskReplicationItemSnapshot{
GUID: types.StringValue(item.GUID),
Label: types.StringValue(item.Label),
ResID: types.StringValue(item.ResID),
SnapSetGUID: types.StringValue(item.SnapSetGUID),
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
TimeStamp: types.Int64Value(int64(item.Timestamp)),
}
obj, diags := types.ObjectValueFrom(ctx, models.DiskReplicationSnapshot, temp)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error diskListFlattenSnapshots struct to obj", diags))
}
tempSlice = append(tempSlice, obj)
}
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemSnapshot}, tempSlice)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error diskListFlattenSnapshots", diags))
}
tflog.Info(ctx, "End diskListFlattenSnapshots")
return res
}

View File

@@ -0,0 +1,50 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
)
// DiskSnapshotDataSource flattens data source for disk snapshot.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskSnapshotDataSource(ctx context.Context, state *models.DataSourceDiskSnapshotModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskSnapshotDataSource")
diskId := uint64(state.DiskID.ValueInt64())
item, diags := utilities.DataSourceDiskSnapshotCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.DiskSnapshotDataSource: before flatten", map[string]any{"disk_id": diskId, "snapshot": item})
id := uuid.New()
*state = models.DataSourceDiskSnapshotModel{
DiskID: state.DiskID,
Label: state.Label,
Timeouts: state.Timeouts,
// computed fields
Id: types.StringValue(id.String()),
GUID: types.StringValue(item.GUID),
ResID: types.StringValue(item.ResID),
SnapSetGUID: types.StringValue(item.SnapSetGUID),
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
TimeStamp: types.Int64Value(int64(item.Timestamp)),
}
tflog.Info(ctx, "flattens.DiskSnapshotDataSource: end flatten", map[string]any{
"disk_id": state.DiskID.ValueInt64(),
"label": state.Label.ValueString(),
})
return nil
}

View File

@@ -0,0 +1,61 @@
package flattens
import (
"context"
"fmt"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
)
// DiskSnapshotListDataSource flattens data source for disk snapshot list.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func DiskSnapshotListDataSource(ctx context.Context, state *models.DataSourceDiskSnapshotListModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskSnapshotListDataSource")
diags := diag.Diagnostics{}
diskId := uint64(state.DiskID.ValueInt64())
snapshots, err := utilities.DiskSnapshotListCheckPresence(ctx, diskId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about disk snapshot list with disk ID %v", diskId), err.Error())
return diags
}
tflog.Info(ctx, "flattens.DiskSnapshotListDataSource: before flatten", map[string]any{"disk_id": diskId, "snapshots": snapshots})
id := uuid.New()
*state = models.DataSourceDiskSnapshotListModel{
DiskID: state.DiskID,
Timeouts: state.Timeouts,
// computed fields
Id: types.StringValue(id.String()),
}
res := make([]models.DiskItemSnapshot, 0, len(*snapshots))
for _, item := range *snapshots {
temp := models.DiskItemSnapshot{
GUID: types.StringValue(item.GUID),
Label: types.StringValue(item.Label),
ResID: types.StringValue(item.ResID),
ReferenceID: types.StringValue(item.ReferenceID),
SnapSetGUID: types.StringValue(item.SnapSetGUID),
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
TimeStamp: types.Int64Value(int64(item.Timestamp)),
}
res = append(res, temp)
}
state.Items = res
tflog.Info(ctx, "flattens.DiskSnapshotListDataSource: end flatten", map[string]any{"disk_id": state.DiskID.ValueInt64()})
return nil
}

View File

@@ -0,0 +1,191 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"strconv"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
)
// DiskResource flattens resource for disk.
// Return error in case resource is not found on the platform.
// Flatten errors are added to tflog.
func DiskResource(ctx context.Context, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskResource")
diags := diag.Diagnostics{}
diskId, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("flattens.DiskResource: Cannot parse disk ID from state", err.Error())
return diags
}
recordDisk, err := utilities.DiskCheckPresence(ctx, diskId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about disk with ID %v", diskId), err.Error())
return diags
}
tflog.Info(ctx, "flattens.DiskResource: before flatten", map[string]any{"disk_id": diskId, "recordDisk": recordDisk})
diskAcl, _ := json.Marshal(recordDisk.ACL)
*plan = models.ResourceDiskModel{
// required fields
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
DiskName: types.StringValue(recordDisk.Name),
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
GID: types.Int64Value(int64(recordDisk.GID)),
// optional fields
SSDSize: plan.SSDSize,
NodeIDs: plan.NodeIDs,
Description: plan.Description,
Pool: plan.Pool,
SEPID: plan.SEPID,
Type: plan.Type,
Detach: plan.Detach,
Permanently: plan.Permanently,
Shareable: plan.Shareable,
Timeouts: plan.Timeouts,
// computed fields
LastUpdated: plan.LastUpdated,
Id: types.StringValue(strconv.Itoa(int(recordDisk.ID))),
DiskId: types.Int64Value(int64(recordDisk.ID)),
AccountName: types.StringValue(recordDisk.AccountName),
ACL: types.StringValue(string(diskAcl)),
Computes: flattenResourceComputes(ctx, recordDisk.Computes),
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
DeviceName: types.StringValue(recordDisk.DeviceName),
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
Order: types.Int64Value(int64(recordDisk.Order)),
Params: types.StringValue(recordDisk.Params),
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
ResID: types.StringValue(recordDisk.ResID),
ResName: types.StringValue(recordDisk.ResName),
Role: types.StringValue(recordDisk.Role),
SepType: types.StringValue(recordDisk.SEPType),
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
Snapshots: flattenResourceDiskSnapshots(ctx, recordDisk.Snapshots),
Status: types.StringValue(recordDisk.Status),
TechStatus: types.StringValue(recordDisk.TechStatus),
VMID: types.Int64Value(int64(recordDisk.VMID)),
}
plan.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskResource: cannot flatten recordDisk.Images to plan.Images", diags))
}
plan.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("flattens.DiskResource: cannot flatten recordDisk.PresentTo to plan.PresentTo", diags))
}
if plan.Description.IsUnknown() {
plan.Description = types.StringValue(recordDisk.Description)
}
if plan.Pool.IsUnknown() {
plan.Pool = types.StringValue(recordDisk.Pool)
}
if plan.SEPID.IsUnknown() {
plan.SEPID = types.Int64Value(int64(recordDisk.SEPID))
}
if plan.Shareable.IsUnknown() {
plan.Shareable = types.BoolValue(recordDisk.Shareable)
}
if plan.Type.IsUnknown() {
plan.Type = types.StringValue(recordDisk.Type)
}
iotune := models.ResourceIOTuneModel{
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemIOTune, iotune)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattens.DiskResource iotune struct to obj", diags))
}
plan.IOTune = obj
tflog.Info(ctx, "flattens.DiskResource: after flatten", map[string]any{"disk_id": plan.Id.ValueString()})
tflog.Info(ctx, "End flattens.DiskResource")
return nil
}
func flattenResourceComputes(ctx context.Context, items map[string]string) types.List {
tflog.Info(ctx, "Start flattenResourceComputes")
tempSlice := make([]types.Object, 0, len(items))
for id, name := range items {
temp := models.ItemComputeModel{
ComputeId: types.StringValue(id),
ComputeName: types.StringValue(name),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemCompute, temp)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenResourceComputes struct to obj", diags))
}
tempSlice = append(tempSlice, obj)
}
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemCompute}, tempSlice)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenResourceComputes", diags))
}
tflog.Info(ctx, "End flattenComputes")
return res
}
func flattenResourceDiskSnapshots(ctx context.Context, snapshots disks.ListSnapshots) types.List {
tflog.Info(ctx, "Start flattenResourceDiskSnapshots")
tempSlice := make([]types.Object, 0, len(snapshots))
for _, item := range snapshots {
temp := models.ItemSnapshotsModel{
GUID: types.StringValue(item.GUID),
Label: types.StringValue(item.Label),
ResID: types.StringValue(item.ResID),
SnapSetGUID: types.StringValue(item.SnapSetGUID),
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
TimeStamp: types.Int64Value(int64(item.Timestamp)),
}
obj, diags := types.ObjectValueFrom(ctx, models.ItemSnapshots, temp)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenResourceDiskSnapshots struct to obj", diags))
}
tempSlice = append(tempSlice, obj)
}
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemSnapshots}, tempSlice)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenResourceDiskSnapshots", diags))
}
tflog.Info(ctx, "End flattenResourceDiskSnapshots")
return res
}

View File

@@ -0,0 +1,184 @@
package flattens
import (
"context"
"encoding/json"
"fmt"
"strconv"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
)
// DiskReplicationResource flattens resource for disk.
// Return error in case resource is not found on the platform.
// Flatten errors are added to tflog.
func DiskReplicationResource(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskReplicationresource")
diags := diag.Diagnostics{}
recordDisk, status, err := utilities.ResourceDiskReplicationCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Cannot get info about disk", err.Error())
return diags
}
tflog.Info(ctx, "flattens.DiskReplicationresource: before flatten")
diskAcl, _ := json.Marshal(recordDisk.ACL)
*state = models.ResourceRecordDiskReplicationModel{
DiskId: state.DiskId,
Name: state.Name,
SepID: state.SepID,
ReplicationId: state.ReplicationId,
Timeouts: state.Timeouts,
PoolName: state.PoolName,
Pause: state.Pause,
Reverse: state.Reverse,
Start: state.Start,
Detach: state.Detach,
Permanently: state.Permanently,
Id: types.StringValue(strconv.Itoa(int(recordDisk.Replication.DiskID))),
ACL: types.StringValue(string(diskAcl)),
BootPartition: types.Int64Value(int64(recordDisk.BootPartition)),
AccountID: types.Int64Value(int64(recordDisk.AccountID)),
AccountName: types.StringValue(recordDisk.AccountName),
Computes: flattenRComputes(ctx, recordDisk.Computes),
CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)),
DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)),
DeviceName: types.StringValue(recordDisk.DeviceName),
DiskPath: types.StringValue(recordDisk.DiskPath),
Description: types.StringValue(recordDisk.Description),
DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)),
GUID: types.Int64Value(int64(recordDisk.GUID)),
GID: types.Int64Value(int64(recordDisk.GID)),
ImageID: types.Int64Value(int64(recordDisk.ImageID)),
Images: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.Images),
IQN: types.StringValue(recordDisk.IQN),
Login: types.StringValue(recordDisk.Login),
Milestones: types.Int64Value(int64(recordDisk.Milestones)),
Order: types.Int64Value(int64(recordDisk.Order)),
Params: types.StringValue(recordDisk.Params),
ParentID: types.Int64Value(int64(recordDisk.ParentID)),
Passwd: types.StringValue(recordDisk.Password),
PCISlot: types.Int64Value(int64(recordDisk.PCISlot)),
Pool: types.StringValue(recordDisk.Pool),
PresentTo: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.PresentTo),
PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)),
PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)),
RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)),
ReferenceID: types.StringValue(recordDisk.ReferenceID),
ResID: types.StringValue(recordDisk.ResID),
ResName: types.StringValue(recordDisk.ResName),
Role: types.StringValue(recordDisk.Role),
SepType: types.StringValue(recordDisk.SEPType),
Shareable: types.BoolValue(recordDisk.Shareable),
SizeMax: types.Int64Value(int64(recordDisk.SizeMax)),
SizeUsed: types.Float64Value(recordDisk.SizeUsed),
Snapshots: replicationFlattenSnapshots(ctx, recordDisk.Snapshots),
Status: types.StringValue(recordDisk.Status),
StatusReplication: types.StringValue(*status),
TechStatus: types.StringValue(recordDisk.TechStatus),
Type: types.StringValue(recordDisk.Type),
VMID: types.Int64Value(int64(recordDisk.VMID)),
}
iotune := models.ResourceDiskReplicationIOTuneModel{
ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)),
ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)),
ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)),
ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)),
SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)),
TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)),
TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)),
TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)),
TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)),
WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)),
WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)),
WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)),
WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)),
}
obj, diags := types.ObjectValueFrom(ctx, models.ResourceDiskReplicationIOTune, iotune)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattens.ReplicationDiskresource iotune struct to obj", diags))
}
state.IOTune = obj
itemReplication := models.ResourceItemReplicationModel{
DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)),
PoolID: types.StringValue(recordDisk.Replication.PoolID),
Role: types.StringValue(recordDisk.Replication.Role),
SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID),
StorageID: types.StringValue(recordDisk.Replication.StorageID),
VolumeID: types.StringValue(recordDisk.Replication.VolumeID),
}
obj, diags = types.ObjectValueFrom(ctx, models.ResourceItemReplication, itemReplication)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattens.ReplicationDiskresource Replication struct to obj", diags))
}
state.Replication = obj
tflog.Info(ctx, "flattens.ReplicationDiskresource: end flatten")
return nil
}
func flattenRComputes(ctx context.Context, items map[string]string) types.List {
tflog.Info(ctx, "Start flattenRComputes")
tempSlice := make([]types.Object, 0, len(items))
for id, name := range items {
temp := models.ResourceReplicationItemComputeModel{
ComputeId: types.StringValue(id),
ComputeName: types.StringValue(name),
}
obj, diags := types.ObjectValueFrom(ctx, models.ResourceReplicationItemCompute, temp)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenRComputes struct to obj", diags))
}
tempSlice = append(tempSlice, obj)
}
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemCompute}, tempSlice)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenRComputes", diags))
}
tflog.Info(ctx, "End flattenRComputes")
return res
}
func replicationFlattenSnapshots(ctx context.Context, snapshots disks.ListSnapshots) types.List {
tflog.Info(ctx, "Start flattenSnapshots")
tempSlice := make([]types.Object, 0, len(snapshots))
for _, item := range snapshots {
temp := models.ResourceReplicationItemSnapshotModel{
GUID: types.StringValue(item.GUID),
Label: types.StringValue(item.Label),
ResID: types.StringValue(item.ResID),
SnapSetGUID: types.StringValue(item.SnapSetGUID),
SnapSetTime: types.Int64Value(int64(item.SnapSetTime)),
TimeStamp: types.Int64Value(int64(item.Timestamp)),
}
obj, diags := types.ObjectValueFrom(ctx, models.ResourceReplicationItemSnapshot, temp)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenSnapshots struct to obj", diags))
}
tempSlice = append(tempSlice, obj)
}
res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ResourceReplicationItemSnapshot}, tempSlice)
if diags != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenSnapshots", diags))
}
tflog.Info(ctx, "End flattenSnapshots")
return res
}

View File

@@ -0,0 +1,65 @@
package flattens
import (
"context"
"fmt"
"strings"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
)
// DiskSnapshotResource flattens resource for disk snapshot.
// Return error in case resource is not found on the platform.
// Flatten errors are added to tflog.
func DiskSnapshotResource(ctx context.Context, plan *models.ResourceDiskSnapshotModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DiskSnapshotResource", map[string]any{
"disk_id": plan.DiskID.ValueInt64(),
"label": plan.Label.ValueString()})
recordSnapshot, diags := utilities.DiskSnapshotCheckPresence(ctx, plan, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.DiskSnapshotResource: before flatten", map[string]any{
"disk_id": plan.DiskID.ValueInt64(),
"label": plan.Label.ValueString(),
"recordDisk": recordSnapshot})
id := plan.Id
if !strings.Contains(id.ValueString(), "#") {
id = types.StringValue(fmt.Sprintf("%d#%s", plan.DiskID.ValueInt64(), plan.Label.ValueString()))
}
*plan = models.ResourceDiskSnapshotModel{
// required fields
DiskID: plan.DiskID,
Label: types.StringValue(recordSnapshot.Label),
// optional fields
Rollback: plan.Rollback,
TimeStamp: plan.TimeStamp,
Timeouts: plan.Timeouts,
// computed fields
Id: id,
GUID: types.StringValue(recordSnapshot.GUID),
ResID: types.StringValue(recordSnapshot.ResID),
SnapSetGUID: types.StringValue(recordSnapshot.SnapSetGUID),
SnapSetTime: types.Int64Value(int64(recordSnapshot.SnapSetTime)),
}
if plan.TimeStamp.IsUnknown() {
plan.TimeStamp = types.Int64Value(int64(recordSnapshot.Timestamp))
}
tflog.Info(ctx, "flattens.DiskResource: after flatten", map[string]any{
"disk_id": plan.DiskID.ValueInt64(),
"label": plan.Label.ValueString()})
return nil
}

View File

@@ -0,0 +1,122 @@
package cbDisks
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/ic"
)
// resourceDiskReplicationInputChecks checks if user provided disk_id exist on the platform during disk replication.
func resourceDiskReplicationInputChecks(ctx context.Context, plan *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
diskId := uint64(plan.DiskId.ValueInt64())
tflog.Info(ctx, "resourceDiskCreateInputChecks: exist disk check", map[string]any{"disk_id": diskId})
err := CheckTatlinDiskID(ctx, diskId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about disk with ID %v", diskId), err.Error())
}
return diags
}
// resourceDiskUpdateInputChecks checks if user provided:
// account_id and gid exist on the platform during disk creation,
// description, pool, sep_id, type are not attempted to be changed.
func resourceDiskUpdateInputChecks(ctx context.Context, plan, state *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
// check accountId and gid
diags.Append(resourceDiskCreateInputChecks(ctx, plan, c)...)
// check description
if !plan.Description.Equal(state.Description) && !plan.Description.IsUnknown() {
diags.AddError(
"resourceDiskUpdateInputChecks: description change is not allowed",
fmt.Sprintf("cannot change description from %s to %s for disk id %s",
state.Description.ValueString(),
plan.Description.ValueString(),
plan.Id.ValueString()))
}
// check pool
if !plan.Pool.Equal(state.Pool) && !plan.Pool.IsUnknown() {
diags.AddError(
"resourceDiskUpdateInputChecks: pool change is not allowed",
fmt.Sprintf("cannot change pool from %s to %s for disk id %s",
state.Pool.ValueString(),
plan.Pool.ValueString(),
plan.Id.ValueString()))
}
// check sep_id
if !plan.SEPID.Equal(state.SEPID) && !plan.SEPID.IsUnknown() {
diags.AddError(
"resourceDiskUpdateInputChecks: sep_id change is not allowed",
fmt.Sprintf("cannot change sep_id from %d to %d for disk id %s",
state.SEPID.ValueInt64(),
plan.SEPID.ValueInt64(),
plan.Id.ValueString()))
}
// check type
if !plan.Type.Equal(state.Type) && !plan.Type.IsUnknown() {
diags.AddError(
"resourceDiskUpdateInputChecks: type change is not allowed",
fmt.Sprintf("cannot change type from %s to %s for disk id %s",
state.Type.ValueString(),
plan.Type.ValueString(),
plan.Id.ValueString()))
}
return diags
}
// resourceDiskCreateInputChecks checks if user provided account_id and gid exist on the platform during disk creation.
func resourceDiskCreateInputChecks(ctx context.Context, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
accountId := uint64(plan.AccountID.ValueInt64())
tflog.Info(ctx, "resourceDiskCreateInputChecks: exist account check", map[string]any{"account_id": accountId})
err := ic.ExistAccount(ctx, accountId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about account with ID %v", accountId), err.Error())
}
gid := uint64(plan.GID.ValueInt64())
tflog.Info(ctx, "resourceDiskCreateInputChecks: exist gid check", map[string]any{"gid": gid})
err = ic.ExistGID(ctx, gid, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about GID %v", gid), err.Error())
}
return diags
}
func CheckTatlinDiskID(ctx context.Context, diskId uint64, c *decort.DecortClient) error {
req := disks.ListRequest{
ByID: diskId,
}
diskList, err := c.CloudBroker().Disks().List(ctx, req)
if err != nil {
return err
}
if len(diskList.Data) == 0 {
return fmt.Errorf("DiskID %d is not allowed or does not exist", diskId)
}
if diskList.Data[0].SEPType != "TATLIN" {
return fmt.Errorf("DiskID %d is not allowed or does not exist", diskId)
}
return nil
}

View File

@@ -0,0 +1,61 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DiskModel struct {
// request fields
DiskID types.Int64 `tfsdk:"disk_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// computed fields
MachineID types.Int64 `tfsdk:"machine_id"`
MachineName types.String `tfsdk:"machine_name"`
DeviceName types.String `tfsdk:"devicename"`
SEPType types.String `tfsdk:"sep_type"`
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
ACL types.String `tfsdk:"acl"`
BootPartition types.Int64 `tfsdk:"boot_partition"`
Computes []ComputeModel `tfsdk:"computes"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Description types.String `tfsdk:"desc"`
DestructionTime types.Int64 `tfsdk:"destruction_time"`
DiskPath types.String `tfsdk:"disk_path"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
ImageID types.Int64 `tfsdk:"image_id"`
Images types.List `tfsdk:"images"`
IOTune *IOModel `tfsdk:"iotune"`
IQN types.String `tfsdk:"iqn"`
Login types.String `tfsdk:"login"`
Milestones types.Int64 `tfsdk:"milestones"`
Name types.String `tfsdk:"disk_name"`
Order types.Int64 `tfsdk:"order"`
Params types.String `tfsdk:"params"`
ParentID types.Int64 `tfsdk:"parent_id"`
Password types.String `tfsdk:"passwd"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
Pool types.String `tfsdk:"pool"`
PresentTo types.List `tfsdk:"present_to"`
PurgeAttempts types.Int64 `tfsdk:"purge_attempts"`
PurgeTime types.Int64 `tfsdk:"purge_time"`
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
ReferenceID types.String `tfsdk:"reference_id"`
Replication *ItemReplModel `tfsdk:"replication"`
ResID types.String `tfsdk:"res_id"`
ResName types.String `tfsdk:"res_name"`
Role types.String `tfsdk:"role"`
SEPID types.Int64 `tfsdk:"sep_id"`
Shareable types.Bool `tfsdk:"shareable"`
SizeMax types.Int64 `tfsdk:"size_max"`
SizeUsed types.Float64 `tfsdk:"size_used"`
Snapshots []SnapshotModel `tfsdk:"snapshots"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
Type types.String `tfsdk:"type"`
VMID types.Int64 `tfsdk:"vmid"`
}

View File

@@ -0,0 +1,116 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DisksModel struct {
// request fields
ByID types.Int64 `tfsdk:"by_id"`
Name types.String `tfsdk:"name"`
AccountName types.String `tfsdk:"account_name"`
DiskMaxSize types.Int64 `tfsdk:"disk_max_size"`
Shared types.Bool `tfsdk:"shared"`
AccountID types.Int64 `tfsdk:"account_id"`
Type types.String `tfsdk:"type"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
//computed fields
Items []DiskItemModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type DiskItemModel struct {
MachineID types.Int64 `tfsdk:"machine_id"`
MachineName types.String `tfsdk:"machine_name"`
DeviceName types.String `tfsdk:"devicename"`
SEPType types.String `tfsdk:"sep_type"`
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
ACL types.String `tfsdk:"acl"`
BootPartition types.Int64 `tfsdk:"boot_partition"`
Computes []ComputeModel `tfsdk:"computes"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Description types.String `tfsdk:"desc"`
DestructionTime types.Int64 `tfsdk:"destruction_time"`
DiskPath types.String `tfsdk:"disk_path"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
ID types.Int64 `tfsdk:"disk_id"`
ImageID types.Int64 `tfsdk:"image_id"`
Images types.List `tfsdk:"images"`
IOTune *IOModel `tfsdk:"iotune"`
IQN types.String `tfsdk:"iqn"`
Login types.String `tfsdk:"login"`
Milestones types.Int64 `tfsdk:"milestones"`
Name types.String `tfsdk:"disk_name"`
Order types.Int64 `tfsdk:"order"`
Params types.String `tfsdk:"params"`
ParentID types.Int64 `tfsdk:"parent_id"`
Password types.String `tfsdk:"passwd"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
Pool types.String `tfsdk:"pool"`
PresentTo types.List `tfsdk:"present_to"`
PurgeAttempts types.Int64 `tfsdk:"purge_attempts"`
PurgeTime types.Int64 `tfsdk:"purge_time"`
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
ReferenceID types.String `tfsdk:"reference_id"`
Replication *ItemReplModel `tfsdk:"replication"`
ResID types.String `tfsdk:"res_id"`
ResName types.String `tfsdk:"res_name"`
Role types.String `tfsdk:"role"`
SEPID types.Int64 `tfsdk:"sep_id"`
Shareable types.Bool `tfsdk:"shareable"`
SizeMax types.Int64 `tfsdk:"size_max"`
SizeUsed types.Float64 `tfsdk:"size_used"`
Snapshots []SnapshotModel `tfsdk:"snapshots"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
Type types.String `tfsdk:"type"`
VMID types.Int64 `tfsdk:"vmid"`
}
type ComputeModel struct {
ComputeID types.String `tfsdk:"compute_id"`
ComputeName types.String `tfsdk:"compute_name"`
}
type ItemReplModel struct {
DiskID types.Int64 `tfsdk:"disk_id"`
PoolID types.String `tfsdk:"pool_id"`
Role types.String `tfsdk:"role"`
SelfVolumeID types.String `tfsdk:"self_volume_id"`
StorageID types.String `tfsdk:"storage_id"`
VolumeID types.String `tfsdk:"volume_id"`
}
type IOModel struct {
ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"`
ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"`
ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"`
ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"`
SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"`
TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"`
TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"`
TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"`
TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"`
WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"`
WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"`
WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"`
WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"`
}
type SnapshotModel struct {
GUID types.String `tfsdk:"guid"`
Label types.String `tfsdk:"label"`
ReferenceID types.String `tfsdk:"reference_id"`
ResID types.String `tfsdk:"res_id"`
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
Timestamp types.Int64 `tfsdk:"timestamp"`
}

View File

@@ -0,0 +1,116 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ListDisksModel struct {
// request fields
ByID types.Int64 `tfsdk:"by_id"`
Name types.String `tfsdk:"name"`
AccountName types.String `tfsdk:"account_name"`
DiskMaxSize types.Int64 `tfsdk:"disk_max_size"`
Shared types.Bool `tfsdk:"shared"`
AccountID types.Int64 `tfsdk:"account_id"`
Type types.String `tfsdk:"type"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
//computed fields
Items []ItemDiskModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemDiskModel struct {
MachineID types.Int64 `tfsdk:"machine_id"`
MachineName types.String `tfsdk:"machine_name"`
DeviceName types.String `tfsdk:"devicename"`
SEPType types.String `tfsdk:"sep_type"`
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
ACL types.String `tfsdk:"acl"`
BootPartition types.Int64 `tfsdk:"boot_partition"`
Computes []ComputesModel `tfsdk:"computes"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Description types.String `tfsdk:"desc"`
DestructionTime types.Int64 `tfsdk:"destruction_time"`
DiskPath types.String `tfsdk:"disk_path"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
ID types.Int64 `tfsdk:"disk_id"`
ImageID types.Int64 `tfsdk:"image_id"`
Images types.List `tfsdk:"images"`
IOTune *IOTuneModel `tfsdk:"iotune"`
IQN types.String `tfsdk:"iqn"`
Login types.String `tfsdk:"login"`
Milestones types.Int64 `tfsdk:"milestones"`
Name types.String `tfsdk:"disk_name"`
Order types.Int64 `tfsdk:"order"`
Params types.String `tfsdk:"params"`
ParentID types.Int64 `tfsdk:"parent_id"`
Password types.String `tfsdk:"passwd"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
Pool types.String `tfsdk:"pool"`
PresentTo types.List `tfsdk:"present_to"`
PurgeAttempts types.Int64 `tfsdk:"purge_attempts"`
PurgeTime types.Int64 `tfsdk:"purge_time"`
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
ReferenceID types.String `tfsdk:"reference_id"`
Replication *ItemReplicationModel `tfsdk:"replication"`
ResID types.String `tfsdk:"res_id"`
ResName types.String `tfsdk:"res_name"`
Role types.String `tfsdk:"role"`
SEPID types.Int64 `tfsdk:"sep_id"`
Shareable types.Bool `tfsdk:"shareable"`
SizeMax types.Int64 `tfsdk:"size_max"`
SizeUsed types.Float64 `tfsdk:"size_used"`
Snapshots []ItemSnapshotModel `tfsdk:"snapshots"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
Type types.String `tfsdk:"type"`
VMID types.Int64 `tfsdk:"vmid"`
}
type ComputesModel struct {
ComputeID types.String `tfsdk:"compute_id"`
ComputeName types.String `tfsdk:"compute_name"`
}
type ItemReplicationModel struct {
DiskID types.Int64 `tfsdk:"disk_id"`
PoolID types.String `tfsdk:"pool_id"`
Role types.String `tfsdk:"role"`
SelfVolumeID types.String `tfsdk:"self_volume_id"`
StorageID types.String `tfsdk:"storage_id"`
VolumeID types.String `tfsdk:"volume_id"`
}
type IOTuneModel struct {
ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"`
ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"`
ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"`
ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"`
SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"`
TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"`
TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"`
TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"`
TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"`
WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"`
WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"`
WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"`
WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"`
}
type ItemSnapshotModel struct {
GUID types.String `tfsdk:"guid"`
Label types.String `tfsdk:"label"`
ReferenceID types.String `tfsdk:"reference_id"`
ResID types.String `tfsdk:"res_id"`
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
Timestamp types.Int64 `tfsdk:"timestamp"`
}

View File

@@ -0,0 +1,19 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceDiskListTypesModel struct {
// request fields - optional
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items types.List `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}

View File

@@ -0,0 +1,31 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceDiskListTypesDetailedModel struct {
// request fields - optional
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemDiskTypeDetailedModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemDiskTypeDetailedModel struct {
Pools []ItemPoolModel `tfsdk:"pools"`
SepID types.Int64 `tfsdk:"sep_id"`
SepName types.String `tfsdk:"sep_name"`
}
type ItemPoolModel struct {
Name types.String `tfsdk:"name"`
System types.String `tfsdk:"system"`
Types types.List `tfsdk:"types"`
}

View File

@@ -0,0 +1,109 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceDiskListUnattachedModel struct {
// request fields
ByID types.Int64 `tfsdk:"by_id"`
AccountName types.String `tfsdk:"account_name"`
DiskMaxSize types.Int64 `tfsdk:"disk_max_size"`
Status types.String `tfsdk:"status"`
AccountID types.Int64 `tfsdk:"account_id"`
SepID types.Int64 `tfsdk:"sep_id"`
PoolName types.String `tfsdk:"pool_name"`
Type types.String `tfsdk:"type"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemDiskUnattachedModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemDiskUnattachedModel struct {
CKey types.String `tfsdk:"ckey"`
Meta types.List `tfsdk:"meta"`
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
ACL types.String `tfsdk:"acl"`
BootPartition types.Int64 `tfsdk:"boot_partition"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Description types.String `tfsdk:"desc"`
DestructionTime types.Int64 `tfsdk:"destruction_time"`
DiskPath types.String `tfsdk:"disk_path"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
DiskId types.Int64 `tfsdk:"disk_id"`
ImageID types.Int64 `tfsdk:"image_id"`
Images types.List `tfsdk:"images"`
IOTune types.Object `tfsdk:"iotune"`
Iqn types.String `tfsdk:"iqn"`
Login types.String `tfsdk:"login"`
Milestones types.Int64 `tfsdk:"milestones"`
DiskName types.String `tfsdk:"disk_name"`
Order types.Int64 `tfsdk:"order"`
Params types.String `tfsdk:"params"`
ParentID types.Int64 `tfsdk:"parent_id"`
Passwd types.String `tfsdk:"passwd"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
Pool types.String `tfsdk:"pool"`
PresentTo types.List `tfsdk:"present_to"`
PurgeAttempts types.Int64 `tfsdk:"purge_attempts"`
PurgeTime types.Int64 `tfsdk:"purge_time"`
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
ReferenceID types.String `tfsdk:"reference_id"`
ResID types.String `tfsdk:"res_id"`
ResName types.String `tfsdk:"res_name"`
Role types.String `tfsdk:"role"`
SepID types.Int64 `tfsdk:"sep_id"`
Shareable types.Bool `tfsdk:"shareable"`
SizeMax types.Int64 `tfsdk:"size_max"`
SizeUsed types.Float64 `tfsdk:"size_used"`
Snapshots types.List `tfsdk:"snapshots"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
Type types.String `tfsdk:"type"`
VMID types.Int64 `tfsdk:"vmid"`
}
var ItemIOTune = map[string]attr.Type{
"read_bytes_sec": types.Int64Type,
"read_bytes_sec_max": types.Int64Type,
"read_iops_sec": types.Int64Type,
"read_iops_sec_max": types.Int64Type,
"size_iops_sec": types.Int64Type,
"total_bytes_sec": types.Int64Type,
"total_bytes_sec_max": types.Int64Type,
"total_iops_sec": types.Int64Type,
"total_iops_sec_max": types.Int64Type,
"write_bytes_sec": types.Int64Type,
"write_bytes_sec_max": types.Int64Type,
"write_iops_sec": types.Int64Type,
"write_iops_sec_max": types.Int64Type,
}
type DiskUnattachedItemSnapshotModel struct {
GUID types.String `tfsdk:"guid"`
Label types.String `tfsdk:"label"`
ResID types.String `tfsdk:"res_id"`
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
Timestamp types.Int64 `tfsdk:"timestamp"`
}
var ItemSnapshot = map[string]attr.Type{
"guid": types.StringType,
"label": types.StringType,
"res_id": types.StringType,
"snap_set_guid": types.StringType,
"snap_set_time": types.Int64Type,
"timestamp": types.Int64Type,
}

View File

@@ -0,0 +1,115 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type RecordDiskModel struct {
// request fields - required
DiskId types.Int64 `tfsdk:"disk_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
ACL types.String `tfsdk:"acl"`
BootPartition types.Int64 `tfsdk:"boot_partition"`
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
Computes types.List `tfsdk:"computes"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
DeviceName types.String `tfsdk:"devicename"`
DiskPath types.String `tfsdk:"disk_path"`
Description types.String `tfsdk:"desc"`
DestructionTime types.Int64 `tfsdk:"destruction_time"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
ImageID types.Int64 `tfsdk:"image_id"`
Images types.List `tfsdk:"images"`
IOTune *DiskReplicationIOTune `tfsdk:"iotune"`
IQN types.String `tfsdk:"iqn"`
Login types.String `tfsdk:"login"`
Milestones types.Int64 `tfsdk:"milestones"`
Name types.String `tfsdk:"disk_name"`
Order types.Int64 `tfsdk:"order"`
Params types.String `tfsdk:"params"`
ParentID types.Int64 `tfsdk:"parent_id"`
Passwd types.String `tfsdk:"passwd"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
Pool types.String `tfsdk:"pool"`
PresentTo types.List `tfsdk:"present_to"`
PurgeAttempts types.Int64 `tfsdk:"purge_attempts"`
PurgeTime types.Int64 `tfsdk:"purge_time"`
Replication *ItemReplicationModel `tfsdk:"replication"`
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
ReferenceID types.String `tfsdk:"reference_id"`
ResID types.String `tfsdk:"res_id"`
ResName types.String `tfsdk:"res_name"`
Role types.String `tfsdk:"role"`
SepType types.String `tfsdk:"sep_type"`
SepID types.Int64 `tfsdk:"sep_id"`
Shareable types.Bool `tfsdk:"shareable"`
SizeMax types.Int64 `tfsdk:"size_max"`
SizeUsed types.Float64 `tfsdk:"size_used"`
Snapshots types.List `tfsdk:"snapshots"`
Status types.String `tfsdk:"status"`
StatusReplication types.String `tfsdk:"status_replication"`
TechStatus types.String `tfsdk:"tech_status"`
Type types.String `tfsdk:"type"`
VMID types.Int64 `tfsdk:"vmid"`
}
type DiskReplicationIOTune struct {
ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"`
ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"`
ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"`
ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"`
SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"`
TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"`
TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"`
TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"`
TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"`
WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"`
WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"`
WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"`
WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"`
}
type ReplicationModel struct {
DiskID types.Int64 `tfsdk:"disk_id"`
PoolID types.String `tfsdk:"pool_id"`
Role types.String `tfsdk:"role"`
SelfVolumeID types.String `tfsdk:"self_volume_id"`
StorageID types.String `tfsdk:"storage_id"`
VolumeID types.String `tfsdk:"volume_id"`
}
type DiskReplicationItemSnapshot struct {
GUID types.String `tfsdk:"guid"`
Label types.String `tfsdk:"label"`
ReferenceID types.String `tfsdk:"reference_id"`
ResID types.String `tfsdk:"res_id"`
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
TimeStamp types.Int64 `tfsdk:"timestamp"`
}
var DiskReplicationSnapshot = map[string]attr.Type{
"guid": types.StringType,
"label": types.StringType,
"res_id": types.StringType,
"snap_set_guid": types.StringType,
"snap_set_time": types.Int64Type,
"timeStamp": types.Int64Type,
}
type ItemComputeModel struct {
ComputeId types.String `tfsdk:"compute_id"`
ComputeName types.String `tfsdk:"compute_name"`
}
var ItemCompute = map[string]attr.Type{
"compute_id": types.StringType,
"compute_name": types.StringType,
}

View File

@@ -0,0 +1,21 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceDiskSnapshotModel struct {
// request fields
DiskID types.Int64 `tfsdk:"disk_id"`
Label types.String `tfsdk:"label"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
GUID types.String `tfsdk:"guid"`
ResID types.String `tfsdk:"res_id"`
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
TimeStamp types.Int64 `tfsdk:"timestamp"`
}

View File

@@ -0,0 +1,26 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceDiskSnapshotListModel struct {
// request fields
DiskID types.Int64 `tfsdk:"disk_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []DiskItemSnapshot `tfsdk:"items"`
}
type DiskItemSnapshot struct {
GUID types.String `tfsdk:"guid"`
Label types.String `tfsdk:"label"`
ReferenceID types.String `tfsdk:"reference_id"`
ResID types.String `tfsdk:"res_id"`
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
TimeStamp types.Int64 `tfsdk:"timestamp"`
}

View File

@@ -0,0 +1,127 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ResourceDiskModel struct {
// request fields - required
AccountID types.Int64 `tfsdk:"account_id"`
DiskName types.String `tfsdk:"disk_name"`
SizeMax types.Int64 `tfsdk:"size_max"`
GID types.Int64 `tfsdk:"gid"`
// request fields - optional
SSDSize types.Int64 `tfsdk:"ssd_size"`
NodeIDs types.List `tfsdk:"node_ids"`
Description types.String `tfsdk:"desc"`
Pool types.String `tfsdk:"pool"`
SEPID types.Int64 `tfsdk:"sep_id"`
Type types.String `tfsdk:"type"`
Detach types.Bool `tfsdk:"detach"`
Permanently types.Bool `tfsdk:"permanently"`
Shareable types.Bool `tfsdk:"shareable"`
IOTune types.Object `tfsdk:"iotune"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
LastUpdated types.String `tfsdk:"last_updated"`
ACL types.String `tfsdk:"acl"`
AccountName types.String `tfsdk:"account_name"`
BootPartition types.Int64 `tfsdk:"boot_partition"`
Computes types.List `tfsdk:"computes"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
DeviceName types.String `tfsdk:"devicename"`
DestructionTime types.Int64 `tfsdk:"destruction_time"`
DiskId types.Int64 `tfsdk:"disk_id"`
DiskPath types.String `tfsdk:"disk_path"`
Guid types.Int64 `tfsdk:"guid"`
ImageID types.Int64 `tfsdk:"image_id"`
Images types.List `tfsdk:"images"`
IQN types.String `tfsdk:"iqn"`
Login types.String `tfsdk:"login"`
Milestones types.Int64 `tfsdk:"milestones"`
Order types.Int64 `tfsdk:"order"`
Params types.String `tfsdk:"params"`
ParentID types.Int64 `tfsdk:"parent_id"`
Passwd types.String `tfsdk:"passwd"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
PresentTo types.List `tfsdk:"present_to"`
PurgeAttempts types.Int64 `tfsdk:"purge_attempts"`
PurgeTime types.Int64 `tfsdk:"purge_time"`
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
ReferenceID types.String `tfsdk:"reference_id"`
ResID types.String `tfsdk:"res_id"`
ResName types.String `tfsdk:"res_name"`
Role types.String `tfsdk:"role"`
SepType types.String `tfsdk:"sep_type"`
SizeUsed types.Float64 `tfsdk:"size_used"`
Snapshots types.List `tfsdk:"snapshots"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
VMID types.Int64 `tfsdk:"vmid"`
}
type ResourceIOTuneModel struct {
ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"`
ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"`
ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"`
ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"`
SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"`
TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"`
TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"`
TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"`
TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"`
WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"`
WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"`
WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"`
WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"`
}
type ItemSnapshotsModel struct {
GUID types.String `tfsdk:"guid"`
Label types.String `tfsdk:"label"`
ResID types.String `tfsdk:"res_id"`
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
TimeStamp types.Int64 `tfsdk:"timestamp"`
}
type ItemComputesModel struct {
ComputeId types.String `tfsdk:"compute_id"`
ComputeName types.String `tfsdk:"compute_name"`
}
var ItemComputes = map[string]attr.Type{
"compute_id": types.StringType,
"compute_name": types.StringType,
}
var ItemSnapshots = map[string]attr.Type{
"guid": types.StringType,
"label": types.StringType,
"res_id": types.StringType,
"snap_set_guid": types.StringType,
"snap_set_time": types.Int64Type,
"timestamp": types.Int64Type,
}
var ResourceItemIOTune = map[string]attr.Type{
"read_bytes_sec": types.Int64Type,
"read_bytes_sec_max": types.Int64Type,
"read_iops_sec": types.Int64Type,
"read_iops_sec_max": types.Int64Type,
"size_iops_sec": types.Int64Type,
"total_bytes_sec": types.Int64Type,
"total_bytes_sec_max": types.Int64Type,
"total_iops_sec": types.Int64Type,
"total_iops_sec_max": types.Int64Type,
"write_bytes_sec": types.Int64Type,
"write_bytes_sec_max": types.Int64Type,
"write_iops_sec": types.Int64Type,
"write_iops_sec_max": types.Int64Type,
}

View File

@@ -0,0 +1,147 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ResourceRecordDiskReplicationModel struct {
// request fields
DiskId types.Int64 `tfsdk:"disk_id"`
Name types.String `tfsdk:"disk_name"`
SepID types.Int64 `tfsdk:"sep_id"`
PoolName types.String `tfsdk:"pool_name"`
Pause types.Bool `tfsdk:"pause"`
Reverse types.Bool `tfsdk:"reverse"`
Start types.Bool `tfsdk:"start"`
Detach types.Bool `tfsdk:"detach"`
Permanently types.Bool `tfsdk:"permanently"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
ACL types.String `tfsdk:"acl"`
BootPartition types.Int64 `tfsdk:"boot_partition"`
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
Computes types.List `tfsdk:"computes"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
DeviceName types.String `tfsdk:"devicename"`
DiskPath types.String `tfsdk:"disk_path"`
Description types.String `tfsdk:"desc"`
DestructionTime types.Int64 `tfsdk:"destruction_time"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
ImageID types.Int64 `tfsdk:"image_id"`
ReplicationId types.Int64 `tfsdk:"replica_disk_id"`
Images types.List `tfsdk:"images"`
IOTune types.Object `tfsdk:"iotune"`
IQN types.String `tfsdk:"iqn"`
Login types.String `tfsdk:"login"`
Milestones types.Int64 `tfsdk:"milestones"`
Order types.Int64 `tfsdk:"order"`
Params types.String `tfsdk:"params"`
ParentID types.Int64 `tfsdk:"parent_id"`
Passwd types.String `tfsdk:"passwd"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
Pool types.String `tfsdk:"pool"`
PresentTo types.List `tfsdk:"present_to"`
PurgeAttempts types.Int64 `tfsdk:"purge_attempts"`
PurgeTime types.Int64 `tfsdk:"purge_time"`
Replication types.Object `tfsdk:"replication"`
RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"`
ReferenceID types.String `tfsdk:"reference_id"`
ResID types.String `tfsdk:"res_id"`
ResName types.String `tfsdk:"res_name"`
Role types.String `tfsdk:"role"`
SepType types.String `tfsdk:"sep_type"`
Shareable types.Bool `tfsdk:"shareable"`
SizeMax types.Int64 `tfsdk:"size_max"`
SizeUsed types.Float64 `tfsdk:"size_used"`
Snapshots types.List `tfsdk:"snapshots"`
Status types.String `tfsdk:"status"`
StatusReplication types.String `tfsdk:"status_replication"`
TechStatus types.String `tfsdk:"tech_status"`
Type types.String `tfsdk:"type"`
VMID types.Int64 `tfsdk:"vmid"`
}
type ResourceDiskReplicationIOTuneModel struct {
ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"`
ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"`
ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"`
ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"`
SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"`
TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"`
TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"`
TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"`
TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"`
WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"`
WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"`
WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"`
WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"`
}
var ResourceDiskReplicationIOTune = map[string]attr.Type{
"read_bytes_sec": types.Int64Type,
"read_bytes_sec_max": types.Int64Type,
"read_iops_sec": types.Int64Type,
"read_iops_sec_max": types.Int64Type,
"size_iops_sec": types.Int64Type,
"total_bytes_sec": types.Int64Type,
"total_bytes_sec_max": types.Int64Type,
"total_iops_sec": types.Int64Type,
"total_iops_sec_max": types.Int64Type,
"write_bytes_sec": types.Int64Type,
"write_bytes_sec_max": types.Int64Type,
"write_iops_sec": types.Int64Type,
"write_iops_sec_max": types.Int64Type,
}
type ResourceItemReplicationModel struct {
DiskID types.Int64 `tfsdk:"disk_id"`
PoolID types.String `tfsdk:"pool_id"`
Role types.String `tfsdk:"role"`
SelfVolumeID types.String `tfsdk:"self_volume_id"`
StorageID types.String `tfsdk:"storage_id"`
VolumeID types.String `tfsdk:"volume_id"`
}
var ResourceItemReplication = map[string]attr.Type{
"disk_id": types.Int64Type,
"pool_id": types.StringType,
"role": types.StringType,
"self_volume_id": types.StringType,
"storage_id:": types.StringType,
"volume_id": types.StringType,
}
type ResourceReplicationItemSnapshotModel struct {
GUID types.String `tfsdk:"guid"`
Label types.String `tfsdk:"label"`
ResID types.String `tfsdk:"res_id"`
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
TimeStamp types.Int64 `tfsdk:"timestamp"`
}
var ResourceReplicationItemSnapshot = map[string]attr.Type{
"guid": types.StringType,
"label": types.StringType,
"res_id": types.StringType,
"snap_set_guid": types.StringType,
"snap_set_time": types.Int64Type,
"timestamp": types.Int64Type,
}
type ResourceReplicationItemComputeModel struct {
ComputeId types.String `tfsdk:"compute_id"`
ComputeName types.String `tfsdk:"compute_name"`
}
var ResourceReplicationItemCompute = map[string]attr.Type{
"compute_id": types.StringType,
"compute_name": types.StringType,
}

View File

@@ -0,0 +1,25 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ResourceDiskSnapshotModel struct {
// request fields - required
DiskID types.Int64 `tfsdk:"disk_id"`
Label types.String `tfsdk:"label"`
// request fields - optional
Rollback types.Bool `tfsdk:"rollback"`
TimeStamp types.Int64 `tfsdk:"timestamp"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
LastUpdated types.String `tfsdk:"last_updated"`
GUID types.String `tfsdk:"guid"`
ResID types.String `tfsdk:"res_id"`
SnapSetGUID types.String `tfsdk:"snap_set_guid"`
SnapSetTime types.Int64 `tfsdk:"snap_set_time"`
}

View File

@@ -0,0 +1,370 @@
package cbDisks
import (
"context"
"reflect"
"strconv"
"time"
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ resource.Resource = &resourceDisk{}
_ resource.ResourceWithImportState = &resourceDisk{}
)
// NewResourceDisk is a helper function to simplify the provider implementation.
func NewResourceDisk() resource.Resource {
return &resourceDisk{}
}
// resourceDisk is the resource implementation.
type resourceDisk struct {
client *decort.DecortClient
}
// Create creates the resource and sets the initial Terraform state.
func (r *resourceDisk) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
// Get plan to create resource group
var plan models.ResourceDiskModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDisk: Error receiving the plan")
return
}
contextCreateMap := map[string]any{
"account_id": plan.AccountID.ValueInt64(),
"disk_name": plan.DiskName.ValueString(),
"size_max": plan.SizeMax.ValueInt64(),
"gid": plan.GID.ValueInt64(),
}
tflog.Info(ctx, "Create resourceDisk: start creating", contextCreateMap)
// Set timeouts
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDisk: Error set timeout")
return
}
tflog.Info(ctx, "Create resourceDisk: set timeouts successfully", map[string]any{
"account_id": plan.AccountID.ValueInt64(),
"disk_name": plan.DiskName.ValueString(),
"size_max": plan.SizeMax.ValueInt64(),
"gid": plan.GID.ValueInt64(),
"createTimeout": createTimeout})
ctx, cancel := context.WithTimeout(ctx, createTimeout)
defer cancel()
// Check if input values are valid in the platform
tflog.Info(ctx, "Create resourceDisk: starting input checks", contextCreateMap)
resp.Diagnostics.Append(resourceDiskCreateInputChecks(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDisk: Error input checks")
return
}
tflog.Info(ctx, "Create resourceDisk: input checks successful", contextCreateMap)
// Make create request and get response
createReq := utilities.CreateRequestResourceDisk(ctx, &plan)
tflog.Info(ctx, "Create resourceDisk: before call CloudBroker().Disks().Create", map[string]any{"req": createReq})
diskId, err := r.client.CloudBroker().Disks().Create(ctx, createReq)
if err != nil {
resp.Diagnostics.AddError(
"Create resourceDisk: unable to Create Disk",
err.Error(),
)
return
}
plan.Id = types.StringValue(strconv.Itoa(int(diskId)))
tflog.Info(ctx, "Create resourceDisk: disk created", map[string]any{"diskId": diskId, "disk_name": plan.DiskName.ValueString()})
// additional settings after disk creation: in case of failures, warnings are added to resp.Diagnostics,
// because additional settings failure is not critical. If errors were added instead of warnings, terraform
// framework would mark resource as tainted and delete it, which would be unwanted behaviour.
if !plan.NodeIDs.IsUnknown() {
resp.Diagnostics.Append(utilities.ResourceDiskChangeNodes(ctx, diskId, nil, &plan, true, r.client)...)
}
if !plan.IOTune.IsUnknown() {
resp.Diagnostics.Append(utilities.LimitIOCreateDisk(ctx, diskId, &plan, r.client)...)
}
if !plan.Shareable.IsUnknown() && plan.Shareable.ValueBool() { // if shareable = true
resp.Diagnostics.Append(utilities.ShareableCreateDisk(ctx, diskId, r.client)...)
}
tflog.Info(ctx, "Create resourceDisk: resource creation is completed", map[string]any{"disk_id": diskId})
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.DiskResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set data last update
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
// Set state to fully populated data
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
if resp.Diagnostics.HasError() {
return
}
}
// Read refreshes the Terraform state with the latest data.
func (r *resourceDisk) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
// Get current state
var state models.ResourceDiskModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDisk: Error get state")
return
}
tflog.Info(ctx, "Read resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDisk: Error set timeout")
return
}
tflog.Info(ctx, "Read resourceDisk: set timeouts successfully", map[string]any{
"disk_id": state.Id.ValueString(),
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// read status
resp.Diagnostics.Append(utilities.DiskReadStatus(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDisk: Error reading disk status")
return
}
// Overwrite items with refreshed state
resp.Diagnostics.Append(flattens.DiskResource(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDisk: Error flatten disk")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDisk: Error set state")
return
}
tflog.Info(ctx, "End read resourceDisk")
}
// Update updates the resource and sets the updated Terraform state on success.
func (r *resourceDisk) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
// Retrieve values from plan
var plan models.ResourceDiskModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDisk: Error receiving the plan")
return
}
tflog.Info(ctx, "Update resourceDisk: got plan successfully", map[string]any{"disk_id": plan.Id.ValueString()})
// Retrieve values from state
var state models.ResourceDiskModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDisk: Error receiving the state")
return
}
tflog.Info(ctx, "Update resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()})
// Set timeouts
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDisk: Error set timeout")
return
}
tflog.Info(ctx, "Update resourceDisk: set timeouts successfully", map[string]any{
"disk_id": state.Id.ValueString(),
"updateTimeout": updateTimeout})
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
defer cancel()
// Checking if inputs are valid
tflog.Info(ctx, "Update resourceDisk: starting input checks", map[string]any{"disk_id": plan.Id.ValueString()})
resp.Diagnostics.Append(resourceDiskUpdateInputChecks(ctx, &plan, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDisk: Error input checks")
return
}
tflog.Info(ctx, "Update resourceDisk: input checks successful", map[string]any{"disk_id": state.Id.ValueString()})
diskId, err := strconv.Atoi(state.Id.ValueString())
if err != nil {
resp.Diagnostics.AddError("Update resourceDisk: Cannot parse disk ID from state", err.Error())
return
}
// resize disk
if !plan.SizeMax.Equal(state.SizeMax) {
resp.Diagnostics.Append(utilities.SizeMaxUpdateDisk(ctx, uint64(diskId), &plan, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDisk: Error resizing disk")
return
}
}
// rename disk
if !plan.DiskName.Equal(state.DiskName) {
resp.Diagnostics.Append(utilities.NameUpdateDisk(ctx, uint64(diskId), &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDisk: Error renaming disk")
return
}
}
// change io limits
if !reflect.DeepEqual(plan.IOTune, state.IOTune) && !plan.IOTune.IsUnknown() {
resp.Diagnostics.Append(utilities.LimitIOUpdateDisk(ctx, uint64(diskId), &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDisk: Error setting new io limits to disk")
return
}
}
// share/unshare disk
if !plan.Shareable.Equal(state.Shareable) && !plan.Shareable.IsUnknown() {
resp.Diagnostics.Append(utilities.ShareableUpdateDisk(ctx, uint64(diskId), plan.Shareable.ValueBool(), r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDisk: Error with disk share/unshare")
return
}
}
// update nodeIDs
if !plan.NodeIDs.Equal(state.NodeIDs) && !plan.NodeIDs.IsUnknown() {
resp.Diagnostics.Append(utilities.ResourceDiskChangeNodes(ctx, uint64(diskId), &state, &plan, false, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskUpdate: Error with update nodeIDs")
return
}
}
tflog.Info(ctx, "Update resourceDisk: disk update is completed", map[string]any{"disk_id": plan.Id.ValueString()})
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.DiskResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set data last update
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
// Set state to fully populated data
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
if resp.Diagnostics.HasError() {
return
}
}
// Delete deletes the resource and removes the Terraform state on success.
func (r *resourceDisk) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
// Get current state
var state models.ResourceDiskModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceDisk: Error get state")
return
}
tflog.Info(ctx, "Delete resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()})
// Set timeouts
deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceDisk: Error set timeout")
return
}
tflog.Info(ctx, "Delete resourceDisk: set timeouts successfully", map[string]any{
"disk_id": state.Id.ValueString(),
"deleteTimeout": deleteTimeout})
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
defer cancel()
detach := true
permanently := true
if state.Detach.ValueBool() == false && !state.Detach.IsNull() && !state.Detach.IsUnknown() {
detach = false
}
if state.Permanently.ValueBool() == false && !state.Permanently.IsNull() && !state.Permanently.IsUnknown() {
permanently = false
}
// Delete existing resource group
delReq := disks.DeleteRequest{
DiskID: uint64(state.DiskId.ValueInt64()),
Detach: detach, // default true
Permanently: permanently, // default true
}
tflog.Info(ctx, "Delete resourceDisk: before call CloudBroker().Disks().Delete", map[string]any{"req": delReq})
_, err := r.client.CloudBroker().Disks().Delete(ctx, delReq)
if err != nil {
resp.Diagnostics.AddError("Delete resourceDisk: Error deleting disk with error: ", err.Error())
return
}
tflog.Info(ctx, "End delete resourceDisk", map[string]any{"disk_id": state.Id.ValueString()})
}
// Schema defines the schema for the resource.
func (r *resourceDisk) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaResourceDisk(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
},
}
}
// Metadata returns the resource type name.
func (r *resourceDisk) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_disk"
}
// Configure adds the provider configured client to the resource.
func (r *resourceDisk) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure resourceDisk")
r.client = client.Resource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure resourceDisk successfully")
}
func (r *resourceDisk) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
// Retrieve import ID and save to id attribute
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
}

View File

@@ -0,0 +1,333 @@
package cbDisks
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ resource.Resource = &resourceDiskReplication{}
_ resource.ResourceWithImportState = &resourceDiskReplication{}
)
// NewResourceDisk is a helper function to simplify the provider implementation.
func NewResourceDiskReplications() resource.Resource {
return &resourceDiskReplication{}
}
// resourceDiskReplication is the resource implementation.
type resourceDiskReplication struct {
client *decort.DecortClient
}
// Create creates the resource and sets the initial Terraform state.
func (r *resourceDiskReplication) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
// Get plan to create resource group
var plan models.ResourceRecordDiskReplicationModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDiskReplication: Error receiving the plan")
return
}
tflog.Info(ctx, "Create resourceDiskReplication: got plan successfully")
tflog.Info(ctx, "Create resourceDiskReplication: start creating")
// Set timeouts
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDiskReplication: Error set timeout")
return
}
tflog.Info(ctx, "Create resourceDiskReplication: set timeouts successfully")
ctx, cancel := context.WithTimeout(ctx, createTimeout)
defer cancel()
// Check if input values are valid in the platform
tflog.Info(ctx, "Create resourceDiskReplication: starting input checks")
resp.Diagnostics.Append(resourceDiskReplicationInputChecks(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDiskReplication: Error input checks")
return
}
tflog.Info(ctx, "Create resourceDiskReplication: input checks successful")
reqCreate := disks.ReplicateRequest{
DiskID: uint64(plan.DiskId.ValueInt64()),
Name: plan.Name.ValueString(),
SepID: uint64(plan.SepID.ValueInt64()),
PoolName: plan.PoolName.ValueString(),
}
diskReplicaId, err := r.client.CloudBroker().Disks().Replicate(ctx, reqCreate)
if err != nil {
resp.Diagnostics.AddError(
"Create resourceDiskReplication: unable to replicate disk",
err.Error(),
)
return
}
tflog.Info(ctx, fmt.Sprintf("resourceDiskReplicationCreate: create replica complete for disk with ID: %d", plan.DiskId.ValueInt64()))
start := plan.Start.ValueBool()
ok := !(plan.Start.IsNull() || plan.Start.IsUnknown())
if ok && !start {
tflog.Info(ctx, fmt.Sprintf("resourceDiskReplicationCreate: replication between disk with ID: %d and replica with ID: %d, try to stop", uint64(plan.DiskId.ValueInt64()), diskReplicaId))
reqStop := disks.ReplicationStopRequest{
DiskID: uint64(plan.DiskId.ValueInt64()),
}
_, err = r.client.CloudBroker().Disks().ReplicationStop(ctx, reqStop)
if err != nil {
resp.Diagnostics.AddError(
fmt.Sprintf("resourceDiskReplicationCreate: replication between disk with ID: %d and replica with ID: %d, stoped", uint64(plan.DiskId.ValueInt64()), diskReplicaId),
err.Error(),
)
return
}
}
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set state to fully populated data
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
if resp.Diagnostics.HasError() {
return
}
}
// Read refreshes the Terraform state with the latest data.
func (r *resourceDiskReplication) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
// Get current state
var state models.ResourceRecordDiskReplicationModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskReplication: Error get state")
return
}
tflog.Info(ctx, "Read resourceDiskReplication: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskReplication: Error set timeout")
return
}
tflog.Info(ctx, "Read resourceDiskReplication: set timeouts successfully", map[string]any{
"disk_id": state.DiskId.ValueInt64(),
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// read status
resp.Diagnostics.Append(utilities.ReplicationDiskReadStatus(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskReplication: Error reading disk status")
return
}
// Overwrite items with refreshed state
resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskReplication: Error flatten disk")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskReplication: Error set state")
return
}
tflog.Info(ctx, "End read resourceDiskReplication")
}
// Update updates the resource and sets the updated Terraform state on success.
func (r *resourceDiskReplication) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
// Retrieve values from plan
var plan models.ResourceRecordDiskReplicationModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskReplication: Error receiving the plan")
return
}
tflog.Info(ctx, "Update resourceDiskReplication: got plan successfully", map[string]any{"disk_id": plan.DiskId.ValueInt64()})
// Retrieve values from state
var state models.ResourceRecordDiskReplicationModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskReplication: Error receiving the state")
return
}
tflog.Info(ctx, "Update resourceDiskReplication: got state successfully", map[string]any{"disk_id": state.DiskId.ValueInt64()})
// Set timeouts
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskReplication: Error set timeout")
return
}
tflog.Info(ctx, "Update resourceDiskReplication: set timeouts successfully", map[string]any{
"disk_id": state.DiskId.ValueInt64(),
"updateTimeout": updateTimeout})
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
defer cancel()
// Checking if inputs are valid
tflog.Info(ctx, "Update resourceDiskReplication: starting input checks", map[string]any{"disk_id": plan.DiskId.ValueInt64()})
resp.Diagnostics.Append(resourceDiskReplicationInputChecks(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskReplication: Error input checks")
return
}
tflog.Info(ctx, "Update resourceDiskReplication: input checks successful", map[string]any{"disk_id": state.DiskId.ValueInt64()})
if !plan.Start.Equal(state.Start) {
resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdateStartStop(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdateStartStop")
return
}
}
if !plan.Pause.Equal(state.Pause) {
resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdatePause(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdatePause")
return
}
}
if !plan.Reverse.Equal(state.Reverse) {
resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdateReverse(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdateReverse")
return
}
}
tflog.Info(ctx, "Update resourceDiskReplication: disk update is completed", map[string]any{"disk_id": plan.DiskId.ValueInt64()})
// Overwrite items with refreshed state
resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskReplication: Error flatten disk")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskReplication: Error set state")
return
}
tflog.Info(ctx, "End read resourceDiskReplication")
}
// Delete deletes the resource and removes the Terraform state on success.
func (r *resourceDiskReplication) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
// Get current state
var state models.ResourceRecordDiskReplicationModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceDiskReplication: Error get state")
return
}
tflog.Info(ctx, "Delete resourceDiskReplication: got state successfully", map[string]any{"disk_id": state.DiskId.ValueInt64()})
// Set timeouts
deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceDiskReplication: Error set timeout")
return
}
tflog.Info(ctx, "Delete resourceDiskReplication: set timeouts successfully", map[string]any{
"disk_id": state.DiskId.ValueInt64(),
"deleteTimeout": deleteTimeout})
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
defer cancel()
detach := true
permanently := true
if state.Detach.ValueBool() == false && !state.Detach.IsNull() && !state.Detach.IsUnknown() {
detach = false
}
if state.Permanently.ValueBool() == false && !state.Permanently.IsNull() && !state.Permanently.IsUnknown() {
permanently = false
}
// Delete existing resource group
delReq := disks.DeleteRequest{
DiskID: uint64(state.DiskId.ValueInt64()),
Detach: detach, // default true
Permanently: permanently, // default true
}
tflog.Info(ctx, "Delete resourceDiskReplication: before call CloudBroker().Disks().Delete", map[string]any{"req": delReq})
_, err := r.client.CloudBroker().Disks().Delete(ctx, delReq)
if err != nil {
resp.Diagnostics.AddError("Delete resourceDiskReplication: Error deleting disk with error: ", err.Error())
return
}
tflog.Info(ctx, "End delete resourceDiskReplication", map[string]any{"disk_id": state.DiskId.ValueInt64()})
}
// Schema defines the schema for the resource.
func (r *resourceDiskReplication) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaResourceDiskReplication(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
},
}
}
// Metadata returns the resource type name.
func (r *resourceDiskReplication) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_disk_replication"
}
// Configure adds the provider configured client to the resource.
func (r *resourceDiskReplication) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure resourceDiskReplication")
r.client = client.Resource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure resourceDiskReplication successfully")
}
func (r *resourceDiskReplication) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
// Retrieve import ID and save to id attribute
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
}

View File

@@ -0,0 +1,295 @@
package cbDisks
import (
"context"
"time"
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ resource.Resource = &resourceDiskSnapshot{}
_ resource.ResourceWithImportState = &resourceDiskSnapshot{}
)
// NewResourceDiskSnapshot is a helper function to simplify the provider implementation.
func NewResourceDiskSnapshot() resource.Resource {
return &resourceDiskSnapshot{}
}
// resourceDiskSnapshot is the resource implementation.
type resourceDiskSnapshot struct {
client *decort.DecortClient
}
// Create creates the resource and sets the initial Terraform state.
func (r *resourceDiskSnapshot) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
// Get plan to create resource group
var plan models.ResourceDiskSnapshotModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDiskSnapshot: Error receiving the plan")
return
}
ctxCreateSnpsht := map[string]any{
"disk_id": plan.DiskID.ValueInt64(),
"label": plan.Label.ValueString(),
}
tflog.Info(ctx, "Create resourceDiskSnapshot: got plan successfully", ctxCreateSnpsht)
tflog.Info(ctx, "Create resourceDiskSnapshot: start creating", ctxCreateSnpsht)
// Set timeouts
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDiskSnapshot: Error set timeout")
return
}
tflog.Info(ctx, "Create resourceDiskSnapshot: set timeouts successfully", map[string]any{
"disk_id": plan.DiskID.ValueInt64(),
"label": plan.Label.ValueString(),
"createTimeout": createTimeout})
ctx, cancel := context.WithTimeout(ctx, createTimeout)
defer cancel()
// Check if input values are valid in the platform
_, diags = utilities.DiskSnapshotCheckPresence(ctx, &plan, r.client)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDiskSnapshot: disk snapshot does not exist")
return
}
tflog.Info(ctx, "Create resourceDiskSnapshot: snapshot successfully loaded", ctxCreateSnpsht)
if plan.Rollback.ValueBool() { // default is false
resp.Diagnostics.Append(utilities.RollbackDiskSnapshot(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceDiskSnapshot: Error rollback snapshot")
return
}
}
tflog.Info(ctx, "Create resourceDiskSnapshot: resource creation is completed", ctxCreateSnpsht)
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.DiskSnapshotResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set data last update
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
// Set state to fully populated data
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
if resp.Diagnostics.HasError() {
return
}
}
// Read refreshes the Terraform state with the latest data.
func (r *resourceDiskSnapshot) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
// Get current state
var state models.ResourceDiskSnapshotModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskSnapshot: Error get state")
return
}
tflog.Info(ctx, "Read resourceDiskSnapshot: got state successfully", map[string]any{
"disk_id": state.DiskID.ValueInt64(),
"label": state.Label.ValueString(),
})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskSnapshot: Error set timeout")
return
}
tflog.Info(ctx, "Read resourceDiskSnapshot: set timeouts successfully", map[string]any{
"disk_id": state.DiskID.ValueInt64(),
"label": state.Label.ValueString(),
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Overwrite items with refreshed state
resp.Diagnostics.Append(flattens.DiskSnapshotResource(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskSnapshot: Error flatten disk snapshot")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceDiskSnapshot: Error set state")
return
}
tflog.Info(ctx, "End read resourceDiskSnapshot")
}
// Update updates the resource and sets the updated Terraform state on success.
func (r *resourceDiskSnapshot) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
// Retrieve values from plan
var plan models.ResourceDiskSnapshotModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskSnapshot: Error receiving the plan")
return
}
ctxSnpsht := map[string]any{
"disk_id": plan.DiskID.ValueInt64(),
"label": plan.Label.ValueString(),
}
tflog.Info(ctx, "Update resourceDiskSnapshot: got plan successfully", ctxSnpsht)
// Retrieve values from state
var state models.ResourceDiskSnapshotModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskSnapshot: Error receiving the state")
return
}
tflog.Info(ctx, "Update resourceDiskSnapshot: got state successfully", ctxSnpsht)
// Set timeouts
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskSnapshot: Error set timeout")
return
}
tflog.Info(ctx, "Update resourceDiskSnapshot: set timeouts successfully", map[string]any{
"disk_id": plan.DiskID.ValueInt64(),
"label": plan.Label.ValueString(),
"updateTimeout": updateTimeout})
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
defer cancel()
if !plan.Rollback.Equal(state.Rollback) && plan.Rollback.ValueBool() {
resp.Diagnostics.Append(utilities.RollbackDiskSnapshot(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceDiskSnapshot: Error rollback snapshot")
return
}
}
tflog.Info(ctx, "Update resourceDiskSnapshot: disk snapshot update is completed", ctxSnpsht)
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.DiskSnapshotResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set data last update
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
// Set state to fully populated data
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
if resp.Diagnostics.HasError() {
return
}
}
// Delete deletes the resource and removes the Terraform state on success.
func (r *resourceDiskSnapshot) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
// Get current state
var state models.ResourceDiskSnapshotModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceDiskSnapshot: Error get state")
return
}
tflog.Info(ctx, "Delete resourceDiskSnapshot: got state successfully", map[string]any{
"disk_id": state.DiskID.ValueInt64(),
"label": state.Label.ValueString()})
// Set timeouts
deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceDiskSnapshot: Error set timeout")
return
}
tflog.Info(ctx, "Delete resourceDiskSnapshot: set timeouts successfully", map[string]any{
"disk_id": state.DiskID.ValueInt64(),
"label": state.Label.ValueString(),
"deleteTimeout": deleteTimeout})
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
defer cancel()
// Check if input values are valid in the platform
_, diags = utilities.DiskSnapshotCheckPresence(ctx, &state, r.client)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceDiskSnapshot: disk snapshot does not exist")
return
}
delReq := disks.SnapshotDeleteRequest{
DiskID: uint64(state.DiskID.ValueInt64()),
Label: state.Label.ValueString(),
}
tflog.Info(ctx, "Delete resourceDiskSnapshot: before call CloudBroker().Disks().SnapshotDelete", map[string]any{"req": delReq})
_, err := r.client.CloudBroker().Disks().SnapshotDelete(ctx, delReq)
if err != nil {
resp.Diagnostics.AddError("Delete resourceDiskSnapshot: Error deleting disk with error: ", err.Error())
return
}
tflog.Info(ctx, "End delete resourceDiskSnapshot", map[string]any{
"disk_id": state.Id.ValueString(),
"label": state.Label.ValueString()})
}
// Schema defines the schema for the resource.
func (r *resourceDiskSnapshot) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaResourceDiskSnapshot(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
},
}
}
// Metadata returns the resource type name.
func (r *resourceDiskSnapshot) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_disk_snapshot"
}
// Configure adds the provider configured client to the resource.
func (r *resourceDiskSnapshot) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure resourceDiskSnapshot")
r.client = client.Resource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure resourceDiskSnapshot successfully")
}
func (r *resourceDiskSnapshot) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
// Retrieve import ID and save to id attribute
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
}

View File

@@ -0,0 +1,253 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceDisk() map[string]schema.Attribute {
return map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Required: true,
},
"account_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"acl": schema.StringAttribute{
Computed: true,
},
"boot_partition": schema.Int64Attribute{
Computed: true,
},
"computes": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"compute_id": schema.StringAttribute{
Computed: true,
},
"compute_name": schema.StringAttribute{
Computed: true,
},
},
},
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"destruction_time": schema.Int64Attribute{
Computed: true,
},
"devicename": schema.StringAttribute{
Computed: true,
},
"disk_path": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"images": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"iotune": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"read_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"read_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"size_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
},
},
"iqn": schema.StringAttribute{
Computed: true,
},
"login": schema.StringAttribute{
Computed: true,
},
"machine_id": schema.Int64Attribute{
Computed: true,
},
"machine_name": schema.StringAttribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"disk_name": schema.StringAttribute{
Computed: true,
},
"order": schema.Int64Attribute{
Computed: true,
},
"params": schema.StringAttribute{
Computed: true,
},
"parent_id": schema.Int64Attribute{
Computed: true,
},
"passwd": schema.StringAttribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"pool": schema.StringAttribute{
Computed: true,
},
"present_to": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"purge_attempts": schema.Int64Attribute{
Computed: true,
},
"purge_time": schema.Int64Attribute{
Computed: true,
},
"replication": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Computed: true,
},
"pool_id": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"self_volume_id": schema.StringAttribute{
Computed: true,
},
"storage_id": schema.StringAttribute{
Computed: true,
},
"volume_id": schema.StringAttribute{
Computed: true,
},
},
},
"reality_device_number": schema.Int64Attribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"sep_id": schema.Int64Attribute{
Computed: true,
},
"sep_type": schema.StringAttribute{
Computed: true,
},
"shareable": schema.BoolAttribute{
Computed: true,
},
"size_max": schema.Int64Attribute{
Computed: true,
},
"size_used": schema.Float64Attribute{
Computed: true,
},
"snapshots": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vmid": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,293 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceDiskList() map[string]schema.Attribute {
return map[string]schema.Attribute{
"by_id": schema.Int64Attribute{
Optional: true,
},
"name": schema.StringAttribute{
Optional: true,
},
"account_name": schema.StringAttribute{
Optional: true,
},
"disk_max_size": schema.Int64Attribute{
Optional: true,
},
"shared": schema.BoolAttribute{
Optional: true,
},
"account_id": schema.Int64Attribute{
Optional: true,
},
"type": schema.StringAttribute{
Optional: true,
},
"sort_by": schema.StringAttribute{
Optional: true,
},
"page": schema.Int64Attribute{
Optional: true,
},
"size": schema.Int64Attribute{
Optional: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"account_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"acl": schema.StringAttribute{
Computed: true,
},
"boot_partition": schema.Int64Attribute{
Computed: true,
},
"computes": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"compute_id": schema.StringAttribute{
Computed: true,
},
"compute_name": schema.StringAttribute{
Computed: true,
},
},
},
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"destruction_time": schema.Int64Attribute{
Computed: true,
},
"devicename": schema.StringAttribute{
Computed: true,
},
"disk_path": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"disk_id": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"images": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"iotune": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"read_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"read_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"size_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
},
},
"iqn": schema.StringAttribute{
Computed: true,
},
"login": schema.StringAttribute{
Computed: true,
},
"machine_id": schema.Int64Attribute{
Computed: true,
},
"machine_name": schema.StringAttribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"disk_name": schema.StringAttribute{
Computed: true,
},
"order": schema.Int64Attribute{
Computed: true,
},
"params": schema.StringAttribute{
Computed: true,
},
"parent_id": schema.Int64Attribute{
Computed: true,
},
"passwd": schema.StringAttribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"pool": schema.StringAttribute{
Computed: true,
},
"present_to": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"purge_attempts": schema.Int64Attribute{
Computed: true,
},
"purge_time": schema.Int64Attribute{
Computed: true,
},
"replication": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Computed: true,
},
"pool_id": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"self_volume_id": schema.StringAttribute{
Computed: true,
},
"storage_id": schema.StringAttribute{
Computed: true,
},
"volume_id": schema.StringAttribute{
Computed: true,
},
},
},
"reality_device_number": schema.Int64Attribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"sep_id": schema.Int64Attribute{
Computed: true,
},
"sep_type": schema.StringAttribute{
Computed: true,
},
"shareable": schema.BoolAttribute{
Computed: true,
},
"size_max": schema.Int64Attribute{
Computed: true,
},
"size_used": schema.Float64Attribute{
Computed: true,
},
"snapshots": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vmid": schema.Int64Attribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,293 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceDiskListDeleted() map[string]schema.Attribute {
return map[string]schema.Attribute{
"by_id": schema.Int64Attribute{
Optional: true,
},
"name": schema.StringAttribute{
Optional: true,
},
"account_name": schema.StringAttribute{
Optional: true,
},
"disk_max_size": schema.Int64Attribute{
Optional: true,
},
"shared": schema.BoolAttribute{
Optional: true,
},
"account_id": schema.Int64Attribute{
Optional: true,
},
"type": schema.StringAttribute{
Optional: true,
},
"sort_by": schema.StringAttribute{
Optional: true,
},
"page": schema.Int64Attribute{
Optional: true,
},
"size": schema.Int64Attribute{
Optional: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"account_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"acl": schema.StringAttribute{
Computed: true,
},
"boot_partition": schema.Int64Attribute{
Computed: true,
},
"computes": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"compute_id": schema.StringAttribute{
Computed: true,
},
"compute_name": schema.StringAttribute{
Computed: true,
},
},
},
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"destruction_time": schema.Int64Attribute{
Computed: true,
},
"devicename": schema.StringAttribute{
Computed: true,
},
"disk_path": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"disk_id": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"images": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"iotune": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"read_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"read_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"size_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
},
},
"iqn": schema.StringAttribute{
Computed: true,
},
"login": schema.StringAttribute{
Computed: true,
},
"machine_id": schema.Int64Attribute{
Computed: true,
},
"machine_name": schema.StringAttribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"disk_name": schema.StringAttribute{
Computed: true,
},
"order": schema.Int64Attribute{
Computed: true,
},
"params": schema.StringAttribute{
Computed: true,
},
"parent_id": schema.Int64Attribute{
Computed: true,
},
"passwd": schema.StringAttribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"pool": schema.StringAttribute{
Computed: true,
},
"present_to": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"purge_attempts": schema.Int64Attribute{
Computed: true,
},
"purge_time": schema.Int64Attribute{
Computed: true,
},
"replication": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Computed: true,
},
"pool_id": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"self_volume_id": schema.StringAttribute{
Computed: true,
},
"storage_id": schema.StringAttribute{
Computed: true,
},
"volume_id": schema.StringAttribute{
Computed: true,
},
},
},
"reality_device_number": schema.Int64Attribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"sep_id": schema.Int64Attribute{
Computed: true,
},
"sep_type": schema.StringAttribute{
Computed: true,
},
"shareable": schema.BoolAttribute{
Computed: true,
},
"size_max": schema.Int64Attribute{
Computed: true,
},
"size_used": schema.Float64Attribute{
Computed: true,
},
"snapshots": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vmid": schema.Int64Attribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,36 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceDiskListTypes() map[string]schema.Attribute {
return map[string]schema.Attribute{
// optional attributes
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "Page number",
},
"size": schema.Int64Attribute{
Optional: true,
Description: "Page size",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,62 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceDiskListTypesDetailed() map[string]schema.Attribute {
return map[string]schema.Attribute{
// optional attributes
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "Page number",
},
"size": schema.Int64Attribute{
Optional: true,
Description: "Page size",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"pools": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"name": schema.StringAttribute{
Computed: true,
},
"system": schema.StringAttribute{
Computed: true,
},
"types": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
},
},
},
"sep_id": schema.Int64Attribute{
Computed: true,
},
"sep_name": schema.StringAttribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,269 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceDiskListUnattached() map[string]schema.Attribute {
return map[string]schema.Attribute{
// optional attributes
"by_id": schema.Int64Attribute{
Optional: true,
Description: "Find by ID",
},
"account_name": schema.StringAttribute{
Optional: true,
Description: "Find by account name",
},
"disk_max_size": schema.Int64Attribute{
Optional: true,
Description: "Find by max disk size",
},
"status": schema.StringAttribute{
Optional: true,
Description: "Find by status",
},
"account_id": schema.Int64Attribute{
Optional: true,
Description: "ID of the account the disks belong to",
},
"sep_id": schema.Int64Attribute{
Optional: true,
Description: "find by sep ID",
},
"pool_name": schema.StringAttribute{
Optional: true,
Description: "find by pool name",
},
"type": schema.StringAttribute{
Optional: true,
Description: "type of the disks",
},
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "Page number",
},
"size": schema.Int64Attribute{
Optional: true,
Description: "Page size",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"ckey": schema.StringAttribute{
Computed: true,
},
"meta": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"account_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"acl": schema.StringAttribute{
Computed: true,
},
"boot_partition": schema.Int64Attribute{
Computed: true,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"destruction_time": schema.Int64Attribute{
Computed: true,
},
"disk_path": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"disk_id": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"images": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"iotune": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"read_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"read_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"size_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
},
},
"iqn": schema.StringAttribute{
Computed: true,
},
"login": schema.StringAttribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"disk_name": schema.StringAttribute{
Computed: true,
},
"order": schema.Int64Attribute{
Computed: true,
},
"params": schema.StringAttribute{
Computed: true,
},
"parent_id": schema.Int64Attribute{
Computed: true,
},
"passwd": schema.StringAttribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"pool": schema.StringAttribute{
Computed: true,
},
"present_to": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"purge_attempts": schema.Int64Attribute{
Computed: true,
},
"purge_time": schema.Int64Attribute{
Computed: true,
},
"reality_device_number": schema.Int64Attribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"sep_id": schema.Int64Attribute{
Computed: true,
},
"shareable": schema.BoolAttribute{
Computed: true,
},
"size_max": schema.Int64Attribute{
Computed: true,
},
"size_used": schema.Float64Attribute{
Computed: true,
},
"snapshots": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vmid": schema.Int64Attribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,251 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceDiskReplication() map[string]schema.Attribute {
return map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Required: true,
Description: "Id of primary disk",
},
"status_replication": schema.StringAttribute{
Computed: true,
Description: "Status of replication",
},
"account_id": schema.Int64Attribute{
Computed: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"account_name": schema.StringAttribute{
Computed: true,
Description: "The name of the subscriber '(account') to whom this disk belongs",
},
"acl": schema.StringAttribute{
Computed: true,
},
"boot_partition": schema.Int64Attribute{
Computed: true,
},
"computes": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"compute_id": schema.StringAttribute{
Computed: true,
},
"compute_name": schema.StringAttribute{
Computed: true,
},
},
},
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"destruction_time": schema.Int64Attribute{
Computed: true,
},
"devicename": schema.StringAttribute{
Computed: true,
},
"disk_path": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"images": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"iotune": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"read_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"read_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"size_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
},
},
"iqn": schema.StringAttribute{
Computed: true,
},
"login": schema.StringAttribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"disk_name": schema.StringAttribute{
Computed: true,
},
"order": schema.Int64Attribute{
Computed: true,
},
"params": schema.StringAttribute{
Computed: true,
},
"parent_id": schema.Int64Attribute{
Computed: true,
},
"passwd": schema.StringAttribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"pool": schema.StringAttribute{
Computed: true,
},
"present_to": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"purge_attempts": schema.Int64Attribute{
Computed: true,
},
"purge_time": schema.Int64Attribute{
Computed: true,
},
"replication": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Computed: true,
},
"pool_id": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"self_volume_id": schema.StringAttribute{
Computed: true,
},
"storage_id": schema.StringAttribute{
Computed: true,
},
"volume_id": schema.StringAttribute{
Computed: true,
},
},
},
"reality_device_number": schema.Int64Attribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"sep_id": schema.Int64Attribute{
Computed: true,
},
"sep_type": schema.StringAttribute{
Computed: true,
},
"shareable": schema.BoolAttribute{
Computed: true,
},
"size_max": schema.Int64Attribute{
Computed: true,
},
"size_used": schema.Float64Attribute{
Computed: true,
},
"snapshots": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vmid": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,39 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceDiskSnapshot() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"disk_id": schema.Int64Attribute{
Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"label": schema.StringAttribute{
Required: true,
Description: "Name of the snapshot",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,48 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceDiskSnapshotList() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"disk_id": schema.Int64Attribute{
Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
}
}

View File

@@ -0,0 +1,303 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaResourceDisk() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"account_id": schema.Int64Attribute{
Required: true,
Description: "ID of the account",
},
"disk_name": schema.StringAttribute{
Required: true,
Description: "Iname of disk",
},
"size_max": schema.Int64Attribute{
Required: true,
Description: "size in GB, default is 10",
},
"gid": schema.Int64Attribute{
Required: true,
Description: "ID of the grid (platform)",
},
"node_ids": schema.ListAttribute{
ElementType: types.Int64Type,
Optional: true,
Description: "ID of the grid (platform)",
},
"ssd_size": schema.Int64Attribute{
Optional: true,
Description: "size in GB, default is 10",
},
// optional attributes
"desc": schema.StringAttribute{
Optional: true,
Computed: true,
Description: "description of disk",
},
"pool": schema.StringAttribute{
Optional: true,
Computed: true,
Description: "Pool for disk location",
},
"sep_id": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Storage endpoint provider ID to create disk",
},
"type": schema.StringAttribute{
Optional: true,
Computed: true,
Validators: []validator.String{
stringvalidator.OneOf("B", "D", "T"), // case is not ignored
},
Description: "(B;D;T) B=Boot;D=Data;T=Temp",
// default is D
},
"detach": schema.BoolAttribute{
Optional: true,
Description: "Detaching the disk from compute",
// default is false
},
"permanently": schema.BoolAttribute{
Optional: true,
Description: "Whether to completely delete the disk, works only with non attached disks",
// default is false
},
"shareable": schema.BoolAttribute{
Optional: true,
Computed: true,
Description: "share disk",
},
"iotune": schema.SingleNestedAttribute{
Optional: true,
Computed: true,
Attributes: map[string]schema.Attribute{
"read_bytes_sec": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Number of bytes to read per second",
},
"read_bytes_sec_max": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Maximum number of bytes to read",
},
"read_iops_sec": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Number of io read operations per second",
},
"read_iops_sec_max": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Maximum number of io read operations",
},
"size_iops_sec": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Size of io operations",
},
"total_bytes_sec": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Total size bytes per second",
},
"total_bytes_sec_max": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Maximum total size of bytes per second",
},
"total_iops_sec": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Total number of io operations per second",
},
"total_iops_sec_max": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Maximum total number of io operations per second",
},
"write_bytes_sec": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Number of bytes to write per second",
},
"write_bytes_sec_max": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Maximum number of bytes to write per second",
},
"write_iops_sec": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Number of write operations per second",
},
"write_iops_sec_max": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Maximum number of write operations per second",
},
},
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"disk_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"acl": schema.StringAttribute{
Computed: true,
},
"boot_partition": schema.Int64Attribute{
Computed: true,
},
"computes": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"compute_id": schema.StringAttribute{
Computed: true,
},
"compute_name": schema.StringAttribute{
Computed: true,
},
},
},
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"destruction_time": schema.Int64Attribute{
Computed: true,
},
"devicename": schema.StringAttribute{
Computed: true,
},
"disk_path": schema.StringAttribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"images": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"last_updated": schema.StringAttribute{
Computed: true,
Description: "Timestamp of the last Terraform update of the disk resource.",
},
"iqn": schema.StringAttribute{
Computed: true,
},
"login": schema.StringAttribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"order": schema.Int64Attribute{
Computed: true,
},
"params": schema.StringAttribute{
Computed: true,
},
"parent_id": schema.Int64Attribute{
Computed: true,
},
"passwd": schema.StringAttribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"purge_attempts": schema.Int64Attribute{
Computed: true,
},
"present_to": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"purge_time": schema.Int64Attribute{
Computed: true,
},
"reality_device_number": schema.Int64Attribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"sep_type": schema.StringAttribute{
Computed: true,
},
"size_used": schema.Float64Attribute{
Computed: true,
},
"snapshots": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"vmid": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,281 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaResourceDiskReplication() map[string]schema.Attribute {
// required attributes
return map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Required: true,
},
"disk_name": schema.StringAttribute{
Required: true,
},
"sep_id": schema.Int64Attribute{
Required: true,
},
"pool_name": schema.StringAttribute{
Required: true,
},
// optional attributes
"pause": schema.BoolAttribute{
Optional: true,
},
"reverse": schema.BoolAttribute{
Optional: true,
},
"start": schema.BoolAttribute{
Optional: true,
},
"detach": schema.BoolAttribute{
Optional: true,
},
"permanently": schema.BoolAttribute{
Optional: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"replica_disk_id": schema.Int64Attribute{
Computed: true,
},
"status_replication": schema.StringAttribute{
Computed: true,
},
"account_id": schema.Int64Attribute{
Computed: true,
},
"account_name": schema.StringAttribute{
Computed: true,
},
"acl": schema.StringAttribute{
Computed: true,
},
"boot_partition": schema.Int64Attribute{
Computed: true,
},
"computes": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"compute_id": schema.StringAttribute{
Computed: true,
},
"compute_name": schema.StringAttribute{
Computed: true,
},
},
},
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"deleted_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"destruction_time": schema.Int64Attribute{
Computed: true,
},
"devicename": schema.StringAttribute{
Computed: true,
},
"disk_path": schema.StringAttribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"image_id": schema.Int64Attribute{
Computed: true,
},
"images": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"iotune": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"read_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"read_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec": schema.Int64Attribute{
Computed: true,
},
"read_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"size_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"total_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec": schema.Int64Attribute{
Computed: true,
},
"total_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec": schema.Int64Attribute{
Computed: true,
},
"write_bytes_sec_max": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec": schema.Int64Attribute{
Computed: true,
},
"write_iops_sec_max": schema.Int64Attribute{
Computed: true,
},
},
},
"iqn": schema.StringAttribute{
Computed: true,
},
"login": schema.StringAttribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"order": schema.Int64Attribute{
Computed: true,
},
"params": schema.StringAttribute{
Computed: true,
},
"parent_id": schema.Int64Attribute{
Computed: true,
},
"passwd": schema.StringAttribute{
Computed: true,
},
"pci_slot": schema.Int64Attribute{
Computed: true,
},
"pool": schema.StringAttribute{
Computed: true,
},
"present_to": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"purge_time": schema.Int64Attribute{
Computed: true,
},
"purge_attempts": schema.Int64Attribute{
Computed: true,
},
"replication": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"disk_id": schema.Int64Attribute{
Computed: true,
},
"pool_id": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"self_volume_id": schema.StringAttribute{
Computed: true,
},
"storage_id": schema.StringAttribute{
Computed: true,
},
"volume_id": schema.StringAttribute{
Computed: true,
},
},
},
"reality_device_number": schema.Int64Attribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"reference_id": schema.StringAttribute{
Computed: true,
},
"res_name": schema.StringAttribute{
Computed: true,
},
"role": schema.StringAttribute{
Computed: true,
},
"sep_type": schema.StringAttribute{
Computed: true,
},
"shareable": schema.BoolAttribute{
Computed: true,
},
"size_max": schema.Int64Attribute{
Computed: true,
},
"size_used": schema.Float64Attribute{
Computed: true,
},
"snapshots": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"label": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
"timestamp": schema.Int64Attribute{
Computed: true,
},
},
},
},
"status": schema.StringAttribute{
Computed: true,
},
"tech_status": schema.StringAttribute{
Computed: true,
},
"type": schema.StringAttribute{
Computed: true,
},
"vmid": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,58 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
)
func MakeSchemaResourceDiskSnapshot() map[string]schema.Attribute {
return map[string]schema.Attribute{
// required attributes
"disk_id": schema.Int64Attribute{
Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"label": schema.StringAttribute{
Required: true,
Description: "Name of the snapshot",
},
// optional attributes
"rollback": schema.BoolAttribute{
Optional: true,
Description: "Needed in order to make a snapshot rollback",
// default is false
},
"timestamp": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Snapshot time",
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
// id is generated as "<disk_id>#<label>"
},
"last_updated": schema.StringAttribute{
Computed: true,
Description: "Timestamp of the last Terraform update of the disk resource.",
},
"guid": schema.StringAttribute{
Computed: true,
},
"res_id": schema.StringAttribute{
Computed: true,
},
"snap_set_guid": schema.StringAttribute{
Computed: true,
},
"snap_set_time": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,30 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
)
func DataSourceDiskCheckPresence(ctx context.Context, state *models.DiskModel, c *decort.DecortClient) (*disks.RecordDisk, diag.Diagnostics) {
diags := diag.Diagnostics{}
listReq := disks.GetRequest{DiskID: uint64(state.DiskID.ValueInt64())}
tflog.Info(ctx, "DataSourceDiskCheckPresence: before call CloudBroker().Disks().Get", map[string]any{
"req": listReq,
})
disk, err := c.CloudBroker().Disks().Get(ctx, listReq)
if err != nil {
diags.AddError("Cannot get info about disk", err.Error())
return nil, diags
}
tflog.Info(ctx, "DataSourceDiskCheckPresence: got successfully")
return disk, nil
}

View File

@@ -0,0 +1,63 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
)
func DataSourceDiskListCheckPresence(ctx context.Context, state *models.DisksModel, c *decort.DecortClient) (*disks.ListDisks, diag.Diagnostics) {
diags := diag.Diagnostics{}
listReq := disks.ListRequest{}
if !state.ByID.IsNull() {
listReq.ByID = uint64(state.ByID.ValueInt64())
}
if !state.Name.IsNull() {
listReq.Name = state.Name.ValueString()
}
if !state.AccountName.IsNull() {
listReq.AccountName = state.AccountName.ValueString()
}
if !state.DiskMaxSize.IsNull() {
listReq.DiskMaxSize = state.DiskMaxSize.ValueInt64()
}
if !state.Shared.IsNull() {
listReq.Shared = state.Shared.ValueBool()
}
if !state.AccountID.IsNull() {
listReq.AccountID = uint64(state.AccountID.ValueInt64())
}
if !state.Type.IsNull() {
listReq.Type = state.Type.ValueString()
}
if !state.SortBy.IsNull() {
listReq.SortBy = state.SortBy.ValueString()
}
if !state.Page.IsNull() {
listReq.Page = uint64(state.Page.ValueInt64())
}
if !state.Size.IsNull() {
listReq.Size = uint64(state.Size.ValueInt64())
}
tflog.Info(ctx, "DataSourceDiskListCheckPresence: before call CloudBroker().Disks().List", map[string]any{
"req": listReq,
})
diskList, err := c.CloudBroker().Disks().List(ctx, listReq)
if err != nil {
diags.AddError("Cannot get info about disk list", err.Error())
return nil, diags
}
tflog.Info(ctx, "DataSourceDiskListCheckPresence: got list successfully", map[string]any{
"entry_count": diskList.EntryCount,
})
return diskList, nil
}

View File

@@ -0,0 +1,63 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
)
func DataSourceDiskListDeletedCheckPresence(ctx context.Context, state *models.ListDisksModel, c *decort.DecortClient) (*disks.ListDisks, diag.Diagnostics) {
diags := diag.Diagnostics{}
listReq := disks.ListDeletedRequest{}
if !state.ByID.IsNull() {
listReq.ByID = uint64(state.ByID.ValueInt64())
}
if !state.Name.IsNull() {
listReq.Name = state.Name.ValueString()
}
if !state.AccountName.IsNull() {
listReq.AccountName = state.AccountName.ValueString()
}
if !state.DiskMaxSize.IsNull() {
listReq.DiskMaxSize = state.DiskMaxSize.ValueInt64()
}
if !state.Shared.IsNull() {
listReq.Shared = state.Shared.ValueBool()
}
if !state.AccountID.IsNull() {
listReq.AccountID = uint64(state.AccountID.ValueInt64())
}
if !state.Type.IsNull() {
listReq.Type = state.Type.ValueString()
}
if !state.SortBy.IsNull() {
listReq.SortBy = state.SortBy.ValueString()
}
if !state.Page.IsNull() {
listReq.Page = uint64(state.Page.ValueInt64())
}
if !state.Size.IsNull() {
listReq.Size = uint64(state.Size.ValueInt64())
}
tflog.Info(ctx, "DataSourceDiskListDeletedCheckPresence: before call CloudBroker().Disks().ListDeleted", map[string]any{
"req": listReq,
})
diskList, err := c.CloudBroker().Disks().ListDeleted(ctx, listReq)
if err != nil {
diags.AddError("Cannot get info about disk list", err.Error())
return nil, diags
}
tflog.Info(ctx, "DataSourceDiskListDeletedCheckPresence: got list successfully", map[string]any{
"entry_count": diskList.EntryCount,
})
return diskList, nil
}

View File

@@ -0,0 +1,42 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
)
func DataSourceDiskListTypesCheckPresence(ctx context.Context, state *models.DataSourceDiskListTypesModel, c *decort.DecortClient) (*disks.ListTypes, diag.Diagnostics) {
diags := diag.Diagnostics{}
listTypesReq := disks.ListTypesRequest{Detailed: false}
if !state.SortBy.IsNull() {
listTypesReq.SortBy = state.SortBy.ValueString()
}
if !state.Page.IsNull() {
listTypesReq.Page = uint64(state.Page.ValueInt64())
}
if !state.Size.IsNull() {
listTypesReq.Size = uint64(state.Size.ValueInt64())
}
tflog.Info(ctx, "DataSourceDiskListTypesCheckPresence: before call CloudBroker().Disks().ListTypes", map[string]any{
"req": listTypesReq,
})
listTypes, err := c.CloudBroker().Disks().ListTypes(ctx, listTypesReq)
if err != nil {
diags.AddError("Cannot get info about disk list types", err.Error())
return nil, diags
}
tflog.Info(ctx, "DataSourceDiskListTypesCheckPresence: got list successfully", map[string]any{
"entry_count": listTypes.EntryCount,
})
return listTypes, nil
}

View File

@@ -0,0 +1,42 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
)
func DataSourceDiskListTypesDetailedCheckPresence(ctx context.Context, state *models.DataSourceDiskListTypesDetailedModel, c *decort.DecortClient) (*disks.ListTypes, diag.Diagnostics) {
diags := diag.Diagnostics{}
listTypesReq := disks.ListTypesRequest{Detailed: true}
if !state.SortBy.IsNull() {
listTypesReq.SortBy = state.SortBy.ValueString()
}
if !state.Page.IsNull() {
listTypesReq.Page = uint64(state.Page.ValueInt64())
}
if !state.Size.IsNull() {
listTypesReq.Size = uint64(state.Size.ValueInt64())
}
tflog.Info(ctx, "DataSourceDiskListTypesDetailedCheckPresence: before call CloudBroker().Disks().ListTypes", map[string]any{
"req": listTypesReq,
})
listTypes, err := c.CloudBroker().Disks().ListTypes(ctx, listTypesReq)
if err != nil {
diags.AddError("Cannot get info about disk list types", err.Error())
return nil, diags
}
tflog.Info(ctx, "DataSourceDiskListTypesDetailedCheckPresence: got list successfully", map[string]any{
"entry_count": listTypes.EntryCount,
})
return listTypes, nil
}

View File

@@ -0,0 +1,66 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
)
func DataSourceDiskListUnattachedCheckPresence(ctx context.Context, state *models.DataSourceDiskListUnattachedModel, c *decort.DecortClient) (*disks.ListUnattachedDisks, diag.Diagnostics) {
diags := diag.Diagnostics{}
listReq := disks.ListUnattachedRequest{}
if !state.ByID.IsNull() {
listReq.ByID = uint64(state.ByID.ValueInt64())
}
if !state.AccountName.IsNull() {
listReq.AccountName = state.AccountName.ValueString()
}
if !state.DiskMaxSize.IsNull() {
listReq.DiskMaxSize = state.DiskMaxSize.ValueInt64()
}
if !state.Status.IsNull() {
listReq.Status = state.Status.ValueString()
}
if !state.AccountID.IsNull() {
listReq.AccountID = uint64(state.AccountID.ValueInt64())
}
if !state.SepID.IsNull() {
listReq.SEPID = uint64(state.SepID.ValueInt64())
}
if !state.PoolName.IsNull() {
listReq.Pool = state.PoolName.ValueString()
}
if !state.Type.IsNull() {
listReq.Type = state.Type.ValueString()
}
if !state.SortBy.IsNull() {
listReq.SortBy = state.SortBy.ValueString()
}
if !state.Page.IsNull() {
listReq.Page = uint64(state.Page.ValueInt64())
}
if !state.Size.IsNull() {
listReq.Size = uint64(state.Size.ValueInt64())
}
tflog.Info(ctx, "DataSourceDiskListUnattachedCheckPresence: before call CloudBroker().Disks().ListUnattached", map[string]any{
"req": listReq,
})
diskList, err := c.CloudBroker().Disks().ListUnattached(ctx, listReq)
if err != nil {
diags.AddError("Cannot get info about disk list", err.Error())
return nil, diags
}
tflog.Info(ctx, "DataSourceDiskListUnattachedCheckPresence: got list successfully", map[string]any{
"entry_count": diskList.EntryCount,
})
return diskList, nil
}

View File

@@ -0,0 +1,32 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
)
func DataSourceDiskReplicationCheckPresence(ctx context.Context, state *models.RecordDiskModel, c *decort.DecortClient) (*disks.RecordDisk, *string, error) {
status, err := c.CloudBroker().Disks().ReplicationStatus(ctx, disks.ReplicationStatusRequest{DiskID: uint64(state.DiskId.ValueInt64())})
if err != nil {
return nil, nil, err
}
req := disks.GetRequest{}
if !state.DiskId.IsNull() && !state.DiskId.IsUnknown() {
req.DiskID = uint64(state.DiskId.ValueInt64())
}
tflog.Info(ctx, "DataSourceDiskReplicationCheckPresence: load disk")
disk, err := c.CloudBroker().Disks().Get(ctx, req)
if err != nil {
return nil, nil, err
}
return disk, &status, nil
}

View File

@@ -0,0 +1,43 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
)
func DataSourceDiskSnapshotCheckPresence(ctx context.Context, plan *models.DataSourceDiskSnapshotModel, c *decort.DecortClient) (*disks.ItemSnapshot, diag.Diagnostics) {
diags := diag.Diagnostics{}
diskId := uint64(plan.DiskID.ValueInt64())
label := plan.Label.ValueString()
tflog.Info(ctx, "Start DataSourceDiskSnapshotCheckPresence", map[string]any{"disk_id": diskId, "label": label})
tflog.Info(ctx, "DataSourceDiskSnapshotCheckPresence: before call CloudBroker().Disks().Get", map[string]any{"disk_id": diskId})
disk, err := c.CloudBroker().Disks().Get(ctx, disks.GetRequest{DiskID: diskId})
if err != nil {
diags.AddError(
fmt.Sprintf("Cannot get info about disk with disk_id %d", diskId),
err.Error(),
)
return nil, diags
}
tflog.Info(ctx, "DataSourceDiskSnapshotCheckPresence: response from CloudBroker().Disks().Get", map[string]any{"response": disk})
for _, sn := range disk.Snapshots {
if label == sn.Label {
return &sn, nil
}
}
diags.AddError(
"Snapshot not found",
fmt.Sprintf("Snapshot with label %s for disk with disk_id %d not found", label, diskId),
)
return nil, diags
}

View File

@@ -0,0 +1,25 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
)
func DiskSnapshotListCheckPresence(ctx context.Context, diskId uint64, c *decort.DecortClient) (*disks.ListSnapshots, error) {
tflog.Info(ctx, fmt.Sprintf("DiskSnapshotListCheckPresence: Get info about disk snapshot list with disk ID - %v", diskId))
recordDisk, err := c.CloudBroker().Disks().Get(ctx, disks.GetRequest{DiskID: diskId})
if err != nil {
return nil, fmt.Errorf("cannot get info about disk with error: %w", err)
}
tflog.Info(ctx, "DiskSnapshotListCheckPresence: response from CloudBroker().Disks().Get", map[string]any{
"disk_id": diskId,
"response": recordDisk})
return &recordDisk.Snapshots, err
}

View File

@@ -0,0 +1,456 @@
package utilities
import (
"context"
"fmt"
"strconv"
"time"
"github.com/hashicorp/terraform-plugin-framework/types"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
)
// DiskCheckPresence checks if disk with diskId exists
func DiskCheckPresence(ctx context.Context, diskId uint64, c *decort.DecortClient) (*disks.RecordDisk, error) {
tflog.Info(ctx, fmt.Sprintf("Get info about disk with ID - %v", diskId))
diskRecord, err := c.CloudBroker().Disks().Get(ctx, disks.GetRequest{DiskID: diskId})
if err != nil {
return nil, fmt.Errorf("cannot get info about disk with error: %w", err)
}
tflog.Info(ctx, "DiskCheckPresence resourceDisk: response from CloudBroker().Disks().Get", map[string]any{"disk_id": diskId, "response": diskRecord})
return diskRecord, err
}
// CreateRequestResourceDisk generates disk create request from plan
func CreateRequestResourceDisk(ctx context.Context, plan *models.ResourceDiskModel) disks.CreateRequest {
tflog.Info(ctx, "Start CreateRequestResourceDisk", map[string]any{
"account_id": plan.AccountID.ValueInt64(),
"disk_name": plan.DiskName.ValueString(),
"size_max": plan.SizeMax.ValueInt64(),
"gid": plan.GID.ValueInt64(),
})
// set up required parameters in disk create request
createReq := disks.CreateRequest{
AccountID: uint64(plan.AccountID.ValueInt64()),
Name: plan.DiskName.ValueString(),
Size: uint64(plan.SizeMax.ValueInt64()),
GID: uint64(plan.GID.ValueInt64()),
}
if plan.Type.IsUnknown() {
createReq.Type = "D" // default value
} else {
createReq.Type = plan.Type.ValueString()
}
if !plan.SEPID.IsUnknown() {
createReq.SEPID = uint64(plan.SEPID.ValueInt64())
}
if !plan.Pool.IsUnknown() {
createReq.Pool = plan.Pool.ValueString()
}
if !plan.Description.IsUnknown() {
createReq.Description = plan.Description.ValueString()
}
if !plan.SSDSize.IsUnknown() {
createReq.SSDSize = uint64(plan.SSDSize.ValueInt64())
}
return createReq
}
// LimitIOCreateDisk sets IO limits that user specified in iotune field for created resource.
// In case of failure returns warnings.
func LimitIOCreateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
limitIOReq := disks.LimitIORequest{
DiskID: diskId,
}
var iotunePlan models.ResourceIOTuneModel
// plan.IOTune is not null as it was checked before call
tflog.Info(ctx, "LimitIOCreateDisk: new iotune specified", map[string]any{"disk_id": diskId})
diags.Append(plan.IOTune.As(ctx, &iotunePlan, basetypes.ObjectAsOptions{})...)
if diags.HasError() {
tflog.Error(ctx, "LimitIOCreateDisk: cannot populate iotune with plan.IOTune object element")
return diags
}
limitIOReq.IOPS = uint64(iotunePlan.TotalIOPSSec.ValueInt64())
limitIOReq.ReadBytesSec = uint64(iotunePlan.ReadBytesSec.ValueInt64())
limitIOReq.ReadBytesSecMax = uint64(iotunePlan.ReadBytesSecMax.ValueInt64())
limitIOReq.ReadIOPSSec = uint64(iotunePlan.ReadIOPSSec.ValueInt64())
limitIOReq.ReadIOPSSecMax = uint64(iotunePlan.ReadIOPSSecMax.ValueInt64())
limitIOReq.SizeIOPSSec = uint64(iotunePlan.SizeIOPSSec.ValueInt64())
limitIOReq.TotalBytesSec = uint64(iotunePlan.TotalBytesSec.ValueInt64())
limitIOReq.TotalBytesSecMax = uint64(iotunePlan.TotalBytesSecMax.ValueInt64())
limitIOReq.TotalIOPSSecMax = uint64(iotunePlan.TotalIOPSSecMax.ValueInt64())
limitIOReq.TotalIOPSSec = uint64(iotunePlan.TotalIOPSSec.ValueInt64())
limitIOReq.WriteBytesSec = uint64(iotunePlan.WriteBytesSec.ValueInt64())
limitIOReq.WriteBytesSecMax = uint64(iotunePlan.WriteBytesSecMax.ValueInt64())
limitIOReq.WriteIOPSSec = uint64(iotunePlan.WriteIOPSSec.ValueInt64())
limitIOReq.WriteIOPSSecMax = uint64(iotunePlan.WriteIOPSSecMax.ValueInt64())
tflog.Info(ctx, "LimitIOCreateDisk: before calling CloudBroker().Disks().LimitIO", map[string]any{
"disk_id": diskId,
"limitIOReq": limitIOReq})
res, err := c.CloudBroker().Disks().LimitIO(ctx, limitIOReq)
if err != nil {
diags.AddWarning("LimitIOCreateDisk: Unable to limit io for Disk",
err.Error())
}
tflog.Info(ctx, "LimitIOCreateDisk: response from CloudBroker().Disks().LimitIO", map[string]any{
"disk_id": diskId,
"response": res})
return diags
}
// ShareableCreateDisk shares disk.
// In case of failure returns warnings.
func ShareableCreateDisk(ctx context.Context, diskId uint64, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
tflog.Info(ctx, "ShareableCreateDisk: before calling CloudBroker().Disks().Share", map[string]any{"disk_id": diskId})
res, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{DiskID: diskId})
if err != nil {
diags.AddWarning("ShareableCreateDisk: Unable to share Disk",
err.Error())
}
tflog.Info(ctx, "ShareableCreateDisk: response from CloudBroker().Disks().Share", map[string]any{
"disk_id": diskId,
"response": res})
return diags
}
// DiskReadStatus loads disk resource by ids id, gets it current status. Performs restore and enable if needed for
// Deleted status.
// In case of failure returns errors.
func DiskReadStatus(ctx context.Context, state *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "DiskReadStatus: Read status disk with ID", map[string]any{"disk_id": state.Id.ValueString()})
diags := diag.Diagnostics{}
diskId, err := strconv.ParseUint(state.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("DiskReadStatus: Cannot parse disk ID from state", err.Error())
return diags
}
recordDisk, err := DiskCheckPresence(ctx, diskId, c)
if err != nil {
diags.AddError("DiskReadStatus: Unable to Read Disk before status check", err.Error())
return diags
}
// check resource status
switch recordDisk.Status {
case status.Modeled:
diags.AddError(
"Disk is in status Modeled",
"please, contact support for more information",
)
return diags
case status.Deleted:
// attempt to restore disk
tflog.Info(ctx, "DiskReadStatus: disk with status.Deleted is being read, attempt to restore it", map[string]any{
"disk_id": recordDisk.ID,
"status": recordDisk.Status})
diags.Append(RestoreDisk(ctx, diskId, c)...)
if diags.HasError() {
tflog.Error(ctx, "DiskReadStatus: cannot restore disk")
return diags
}
tflog.Info(ctx, "DiskReadStatus: disk restored successfully", map[string]any{"disk_id": diskId})
state.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
case status.Destroyed, status.Purged:
diags.AddError(
"DiskReadStatus: Disk is in status Destroyed or Purged",
fmt.Sprintf("the resource with disk_id %d cannot be read because it has been destroyed or purged", diskId),
)
return diags
}
return nil
}
// RestoreDisk performs disk Restore request.
// Returns error in case of failures.
func RestoreDisk(ctx context.Context, diskId uint64, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
restoreReq := disks.RestoreRequest{
DiskID: diskId,
}
tflog.Info(ctx, "RestoreDisk: before calling CloudBroker().Disks().Restore", map[string]any{"diskId": diskId, "req": restoreReq})
res, err := c.CloudBroker().Disks().Restore(ctx, restoreReq)
if err != nil {
diags.AddError(
"RestoreDisk: cannot restore disk",
err.Error(),
)
return diags
}
tflog.Info(ctx, "RestoreDisk: response from CloudBroker().Disks().Restore", map[string]any{"disk_id": diskId, "response": res})
return nil
}
// SizeMaxUpdateDisk resizes disk.
// Returns error in case of failures.
func SizeMaxUpdateDisk(ctx context.Context, diskId uint64, plan, state *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
var diags diag.Diagnostics
resizeReq := disks.ResizeRequest{
DiskID: diskId,
}
// check if resize request is valid
if plan.SizeMax.ValueInt64() < state.SizeMax.ValueInt64() {
diags.AddError(
"SizeMaxUpdateDisk: reducing disk size is not allowed",
fmt.Sprintf("disk with id %s has state size %d, plan size %d",
plan.Id.ValueString(),
state.SizeMax.ValueInt64(),
plan.SizeMax.ValueInt64()))
return diags
}
resizeReq.Size = uint64(plan.SizeMax.ValueInt64())
tflog.Info(ctx, "SizeMaxUpdateDisk: before calling CloudBroker().Disks().Resize2", map[string]any{
"disk_id": plan.Id.ValueString(),
"size_max_state": state.SizeMax.ValueInt64(),
"size_max_plan": plan.SizeMax.ValueInt64(),
"req": resizeReq,
})
res, err := c.CloudBroker().Disks().Resize2(ctx, resizeReq)
if err != nil {
diags.AddError("can not resize disk", err.Error())
return diags
}
tflog.Info(ctx, "SizeMaxUpdateDisk: response from CloudBroker().Disks().Resize2", map[string]any{
"disk_id": plan.Id.ValueString(),
"response": res})
return nil
}
// NameUpdateDisk renames disk.
// Returns error in case of failures.
func NameUpdateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
var diags diag.Diagnostics
renameReq := disks.RenameRequest{
DiskID: diskId,
Name: plan.DiskName.ValueString(),
}
tflog.Info(ctx, "NameUpdateDisk: before calling CloudBroker().Disks().Rename", map[string]any{
"disk_id": plan.Id.ValueString(),
"disk_name_plan": plan.DiskName.ValueString(),
"req": renameReq,
})
res, err := c.CloudBroker().Disks().Rename(ctx, renameReq)
if err != nil {
diags.AddError("NameUpdateDisk: can not rename disk", err.Error())
return diags
}
tflog.Info(ctx, "NameUpdateDisk: response from CloudBroker().Disks().Rename", map[string]any{
"disk_id": plan.Id.ValueString(),
"response": res})
return nil
}
// LimitIOUpdateDisk changes IO limits that user specified in iotune field for updated resource.
// In case of failure returns errors.
func LimitIOUpdateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
limitIOReq := disks.LimitIORequest{
DiskID: diskId,
}
var iotunePlan models.ResourceIOTuneModel
// plan.IOTune is not null as it was checked before call
tflog.Info(ctx, "LimitIOUpdateDisk: new iotune specified", map[string]any{"disk_id": diskId})
diags.Append(plan.IOTune.As(ctx, &iotunePlan, basetypes.ObjectAsOptions{})...)
if diags.HasError() {
tflog.Error(ctx, "LimitIOUpdateDisk: cannot populate iotune with plan.IOTune object element")
return diags
}
limitIOReq.IOPS = uint64(iotunePlan.TotalIOPSSec.ValueInt64())
limitIOReq.ReadBytesSec = uint64(iotunePlan.ReadBytesSec.ValueInt64())
limitIOReq.ReadBytesSecMax = uint64(iotunePlan.ReadBytesSecMax.ValueInt64())
limitIOReq.ReadIOPSSec = uint64(iotunePlan.ReadIOPSSec.ValueInt64())
limitIOReq.ReadIOPSSecMax = uint64(iotunePlan.ReadIOPSSecMax.ValueInt64())
limitIOReq.SizeIOPSSec = uint64(iotunePlan.SizeIOPSSec.ValueInt64())
limitIOReq.TotalBytesSec = uint64(iotunePlan.TotalBytesSec.ValueInt64())
limitIOReq.TotalBytesSecMax = uint64(iotunePlan.TotalBytesSecMax.ValueInt64())
limitIOReq.TotalIOPSSecMax = uint64(iotunePlan.TotalIOPSSecMax.ValueInt64())
limitIOReq.TotalIOPSSec = uint64(iotunePlan.TotalIOPSSec.ValueInt64())
limitIOReq.WriteBytesSec = uint64(iotunePlan.WriteBytesSec.ValueInt64())
limitIOReq.WriteBytesSecMax = uint64(iotunePlan.WriteBytesSecMax.ValueInt64())
limitIOReq.WriteIOPSSec = uint64(iotunePlan.WriteIOPSSec.ValueInt64())
limitIOReq.WriteIOPSSecMax = uint64(iotunePlan.WriteIOPSSecMax.ValueInt64())
tflog.Info(ctx, "LimitIOUpdateDisk: before calling CloudBroker().Disks().LimitIO", map[string]any{
"disk_id": diskId,
"limitIOReq": limitIOReq})
res, err := c.CloudBroker().Disks().LimitIO(ctx, limitIOReq)
if err != nil {
diags.AddError("LimitIOUpdateDisk: Unable to limit io for Disk",
err.Error())
return diags
}
tflog.Info(ctx, "LimitIOUpdateDisk: response from CloudBroker().Disks().LimitIO", map[string]any{
"disk_id": diskId,
"response": res})
return nil
}
// ShareableUpdateDisk shares or unshares disk.
// In case of failure returns errors.
func ShareableUpdateDisk(ctx context.Context, diskId uint64, share bool, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
// share
if share {
tflog.Info(ctx, "ShareableUpdateDisk: before calling CloudBroker().Disks().Share", map[string]any{"disk_id": diskId})
res, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{DiskID: diskId})
if err != nil {
diags.AddError("ShareableUpdateDisk: Unable to share Disk",
err.Error())
return diags
}
tflog.Info(ctx, "ShareableUpdateDisk: response from CloudBroker().Disks().Share", map[string]any{
"disk_id": diskId,
"response": res})
}
// unshare
if !share {
tflog.Info(ctx, "ShareableUpdateDisk: before calling CloudBroker().Disks().Unshare", map[string]any{"disk_id": diskId})
res, err := c.CloudBroker().Disks().Unshare(ctx, disks.UnshareRequest{DiskID: diskId})
if err != nil {
diags.AddError("ShareableUpdateDisk: Unable to unshare Disk",
err.Error())
return diags
}
tflog.Info(ctx, "ShareableUpdateDisk: response from CloudBroker().Disks().Unshare", map[string]any{
"disk_id": diskId,
"response": res})
}
return nil
}
func ResourceDiskChangeNodes(ctx context.Context, diskID uint64, state, plan *models.ResourceDiskModel, afterCreate bool, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
presentIDs := make([]uint64, 0)
newNodes := make([]uint64, 0, len(plan.NodeIDs.Elements()))
diagsI := plan.NodeIDs.ElementsAs(ctx, &newNodes, true)
if diagsI.HasError() {
tflog.Error(ctx, "resourceDiskChangeNodes: cannot populate newNodeIDs with plan.NodeIDs list elements")
diags.AddWarning("resourceDiskChangeNodes: cannot populate newNodeIDs with plan.NodeIDs list elements",
fmt.Sprintf("%v", diagsI))
return diags
}
oldNodes := make([]uint64, 0)
if afterCreate {
presentIDs = newNodes
} else {
diagsI = state.NodeIDs.ElementsAs(ctx, &oldNodes, true)
if diagsI.HasError() {
tflog.Error(ctx, "resourceDiskChangeNodes: cannot populate oldNodes with state.NodeIDs list elements")
diags.AddWarning("resourceDiskChangeNodes: cannot populate oldNodes with state.NodeIDs list elements",
fmt.Sprintf("%v", diagsI))
return diags
}
presentIDs = difference(newNodes, oldNodes)
}
for _, presentID := range presentIDs {
req := disks.PresentRequest{
DiskID: diskID,
NodeID: presentID,
}
_, err := c.CloudBroker().Disks().Present(ctx, req)
if err != nil {
diags.AddWarning("resourceDiskChangeNodes: Unable to present presents disk to node",
err.Error())
}
}
if afterCreate {
return diags
}
depresentIDs := difference(oldNodes, newNodes)
if len(depresentIDs) > 0 {
for _, nodeID := range depresentIDs {
req := disks.DepresentRequest{
DiskID: diskID,
NodeID: nodeID,
}
_, err := c.CloudBroker().Disks().Depresent(ctx, req)
if err != nil {
diags.AddWarning("resourceDiskChangeNodes: Unable to depresent depresents disk from node",
err.Error())
}
}
}
return diags
}
func difference(set, check []uint64) []uint64 {
mapCheck := make(map[uint64]struct{})
for _, id := range check {
mapCheck[id] = struct{}{}
}
var diff []uint64
for _, id := range set {
if _, ok := mapCheck[id]; !ok {
diff = append(diff, uint64(id))
}
}
return diff
}

View File

@@ -0,0 +1,172 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
)
func UtilityDiskReplicationUpdateStartStop(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
diskId := uint64(state.DiskId.ValueInt64())
targetDiskId := uint64(state.ReplicationId.ValueInt64())
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: start update for disk replica with ID: %d", diskId))
ok := !(state.Start.IsNull() || state.Start.IsUnknown())
start := state.Start.ValueBool()
if ok && start {
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: start disk replication from Disk with ID: %d to Disk with ID: %d", diskId, targetDiskId))
req := disks.ReplicationStartRequest{
DiskID: diskId,
TargetDiskID: targetDiskId,
}
_, err := c.CloudBroker().Disks().ReplicationStart(ctx, req)
if err != nil {
diags.AddError("UtilityDiskReplicationUpdateStartStop: Unable to start replicate disk", err.Error())
return diags
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: start disk replication from Disk with ID: %d to Disk with ID: %d, complete", diskId, targetDiskId))
}
if ok && !start {
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: stop disk replication from Disk with ID: %d to Disk with ID: %d", targetDiskId, diskId))
req := disks.ReplicationStopRequest{
DiskID: targetDiskId,
}
_, err := c.CloudBroker().Disks().ReplicationStop(ctx, req)
if err != nil {
diags.AddError("UtilityDiskReplicationUpdateStartStop: Unable to stop replicate disk", err.Error())
return diags
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: stop disk replication from Disk with ID: %d to Disk with ID: %d, complete", targetDiskId, diskId))
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdateStartStop: complete update for disk replica with ID: %d", diskId))
return nil
}
func UtilityDiskReplicationUpdatePause(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
diskId := uint64(state.DiskId.ValueInt64())
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: start update for disk replica with ID: %d", diskId))
pause := state.Pause.ValueBool()
ok := !(state.Pause.IsNull() || state.Pause.IsUnknown())
if ok && pause {
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: pause disk replication with ID: %d", diskId))
req := disks.ReplicationSuspendRequest{
DiskID: diskId,
}
_, err := c.CloudBroker().Disks().ReplicationSuspend(ctx, req)
if err != nil {
diags.AddError("utilityDiskReplicationUpdatePause: Unable to pause disk", err.Error())
return diags
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: pause disk replication with ID: %d, complete", diskId))
}
if ok && !pause {
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: resume disk replication with ID: %d", diskId))
req := disks.ReplicationResumeRequest{
DiskID: diskId,
}
_, err := c.CloudBroker().Disks().ReplicationResume(ctx, req)
if err != nil {
diags.AddError("utilityDiskReplicationUpdatePause: Unable to resume disk", err.Error())
return diags
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: resume disk replication with ID: %d, complete", diskId))
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicationUpdatePause: complete update for disk replica with ID: %d", diskId))
return nil
}
func UtilityDiskReplicationUpdateReverse(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
diskId := uint64(state.DiskId.ValueInt64())
targetDiskId := uint64(state.ReplicationId.ValueInt64())
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: start update for disk replica with ID: %d", diskId))
reverse := state.Reverse.ValueBool()
ok := !(state.Reverse.IsNull() || state.Reverse.IsUnknown())
if ok && reverse {
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: reverse disk replication from Disk with ID: %d to Disk with ID: %d", diskId, targetDiskId))
req := disks.ReplicationReverseRequest{
DiskID: diskId,
}
_, err := c.CloudBroker().Disks().ReplicationReverse(ctx, req)
if err != nil {
diags.AddError("utilityDiskReplicationUpdateReverse: Unable to reverse disk", err.Error())
return diags
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: reverse disk replication from Disk with ID: %d to Disk with ID: %d, complete", diskId, targetDiskId))
}
if ok && !reverse {
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: reverse disk replication from Disk with ID: %d to Disk with ID: %d", targetDiskId, diskId))
req := disks.ReplicationReverseRequest{
DiskID: targetDiskId,
}
_, err := c.CloudBroker().Disks().ReplicationReverse(ctx, req)
if err != nil {
diags.AddError("utilityDiskReplicationUpdateReverse: Unable to reverse disk", err.Error())
return diags
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: reverse disk replication from Disk with ID: %d to Disk with ID: %d, complete", targetDiskId, diskId))
}
tflog.Info(ctx, fmt.Sprintf("utilityDiskReplicaUpdateReverse: complete update for disk replica with ID: %d", diskId))
return nil
}
func ResourceDiskReplicationCheckPresence(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) (*disks.RecordDisk, *string, error) {
status, err := c.CloudBroker().Disks().ReplicationStatus(ctx, disks.ReplicationStatusRequest{DiskID: uint64(state.DiskId.ValueInt64())})
if err != nil {
return nil, nil, err
}
req := disks.GetRequest{}
if !state.DiskId.IsNull() && !state.DiskId.IsUnknown() {
req.DiskID = uint64(state.DiskId.ValueInt64())
} else {
req.DiskID = uint64(state.ReplicationId.ValueInt64())
}
tflog.Info(ctx, "ResourceDiskReplicationCheckPresence: load disk")
disk, err := c.CloudBroker().Disks().Get(ctx, req)
if err != nil {
return nil, nil, err
}
return disk, &status, nil
}
// DiskReadStatus loads disk resource by ids id, gets it current status.
// In case of failure returns errors.
func ReplicationDiskReadStatus(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ReplicationDiskReadStatus: Read status disk with ID", map[string]any{"disk_id": state.DiskId.ValueInt64()})
diags := diag.Diagnostics{}
_, _, err := ResourceDiskReplicationCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("ReplicationDiskReadStatus: Unable to Read Disk before status check", err.Error())
return diags
}
return nil
}

View File

@@ -0,0 +1,94 @@
package utilities
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
)
// DiskSnapshotCheckPresence checks if disk snapshot exists
func DiskSnapshotCheckPresence(ctx context.Context, plan *models.ResourceDiskSnapshotModel, c *decort.DecortClient) (*disks.ItemSnapshot, diag.Diagnostics) {
diags := diag.Diagnostics{}
// take diskId and label from plan
diskId := uint64(plan.DiskID.ValueInt64())
label := plan.Label.ValueString()
// take diskId and label from Id for imported resource
if strings.Contains(plan.Id.ValueString(), "#") {
diskIdInt, err := strconv.Atoi(strings.Split(plan.Id.ValueString(), "#")[0])
if err != nil {
diags.AddError("Cannot parse disk ID from state", err.Error())
return nil, diags
}
diskId = uint64(diskIdInt)
label = strings.Split(plan.Id.ValueString(), "#")[1]
}
tflog.Info(ctx, "Start DiskSnapshotCheckPresence", map[string]any{
"disk_id": diskId,
"label": label,
"id": plan.Id.ValueString(),
})
tflog.Info(ctx, "DiskSnapshotCheckPresence: before call CloudBroker().Disks().Get", map[string]any{"disk_id": diskId})
disk, err := c.CloudBroker().Disks().Get(ctx, disks.GetRequest{DiskID: diskId})
if err != nil {
diags.AddError(
fmt.Sprintf("Cannot get info about disk with disk_id %d", diskId),
err.Error(),
)
return nil, diags
}
tflog.Info(ctx, "DiskSnapshotCheckPresence: response from CloudBroker().Disks().Get", map[string]any{"response": disk})
for _, sn := range disk.Snapshots {
if label == sn.Label {
return &sn, nil
}
}
diags.AddError(
"Snapshot not found",
fmt.Sprintf("Snapshot with label %s for disk with disk_id %d not found", label, diskId),
)
return nil, diags
}
// RollbackDiskSnapshot rollbacks disk snapshot.
// Returns error in case of failures.
func RollbackDiskSnapshot(ctx context.Context, plan *models.ResourceDiskSnapshotModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
rollbackReq := disks.SnapshotRollbackRequest{
DiskID: uint64(plan.DiskID.ValueInt64()),
Label: plan.Label.ValueString(),
}
if !plan.TimeStamp.IsUnknown() {
rollbackReq.TimeStamp = uint64(plan.TimeStamp.ValueInt64())
}
tflog.Info(ctx, "RollbackDiskSnapshot: before calling CloudBroker().Disks().SnapshotRollback", map[string]any{"req": rollbackReq})
res, err := c.CloudBroker().Disks().SnapshotRollback(ctx, rollbackReq)
if err != nil {
diags.AddError(
"RollbackDiskSnapshot: Cannot rollback snapshot",
err.Error(),
)
return diags
}
tflog.Info(ctx, "RollbackDiskSnapshot: response from CloudBroker().Disks().SnapshotRollback", map[string]any{
"disk_id": plan.DiskID.ValueInt64(),
"label": plan.Label.ValueString(),
"response": res})
return nil
}

View File

@@ -303,3 +303,21 @@ func ExistImages(ctx context.Context, imageIDs []uint64, c *decort.DecortClient)
return nil
}
func ExistExtNetInRG(ctx context.Context, extNetId, accountId uint64, c *decort.DecortClient) error {
req := extnet.ListRequest{
AccountID: accountId,
ByID: extNetId,
}
listExtNet, err := c.CloudBroker().ExtNet().List(ctx, req)
if err != nil {
return err
}
if len(listExtNet.Data) == 0 {
return fmt.Errorf("EXTNET with ID %v not found for account with id %d", extNetId, accountId)
}
return nil
}

View File

@@ -0,0 +1,91 @@
package k8ci
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceK8CI{}
)
func NewDataSourceK8CI() datasource.DataSource {
return &dataSourceK8CI{}
}
// dataSourceK8CI is the data source implementation.
type dataSourceK8CI struct {
client *decort.DecortClient
}
func (d *dataSourceK8CI) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceK8CIModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceK8CI: Error get state")
return
}
k8ciID := uint64(state.K8ciID.ValueInt64())
tflog.Info(ctx, "Read dataSourceK8CI: got state successfully", map[string]any{"k8ci_id": k8ciID})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceK8CI: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceK8CI: set timeouts successfully", map[string]any{
"k8ci_id": k8ciID,
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.K8CIDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceK8CI: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceK8CI: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceK8CI", map[string]any{"k8ci_id": k8ciID})
}
func (d *dataSourceK8CI) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceK8CI(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceK8CI) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_k8ci"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceK8CI) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceK8CI")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceK8ci successfully")
}

View File

@@ -0,0 +1,89 @@
package k8ci
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceK8CIList{}
)
func NewDataSourceK8CIList() datasource.DataSource {
return &dataSourceK8CIList{}
}
// dataSourceK8CIList is the data source implementation.
type dataSourceK8CIList struct {
client *decort.DecortClient
}
func (d *dataSourceK8CIList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceK8CIListModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceK8CIList: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceK8CIList: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceK8CIList: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceK8CIList: set timeouts successfully", map[string]any{
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.K8CIListDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceK8CIList: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceK8CIList: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceK8CIList")
}
func (d *dataSourceK8CIList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceK8CIList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceK8CIList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_k8ci_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceK8CIList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceK8CIList")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceK8CIList successfully")
}

View File

@@ -0,0 +1,89 @@
package k8ci
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceK8CIListDeleted{}
)
func NewDataSourceK8CIListDeleted() datasource.DataSource {
return &dataSourceK8CIListDeleted{}
}
// dataSourceK8CIListDeleted is the data source implementation.
type dataSourceK8CIListDeleted struct {
client *decort.DecortClient
}
func (d *dataSourceK8CIListDeleted) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceK8CIListDeletedModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceK8CIListDeleted: Error get state")
return
}
tflog.Info(ctx, "Read dataSourceK8CIListDeleted: got state successfully")
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceK8CIListDeleted: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceK8CIListDeleted: set timeouts successfully", map[string]any{
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.K8CIListDeletedDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceK8CIListDeletedDeleted: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceK8CIListDeleted: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceK8CIListDeleted")
}
func (d *dataSourceK8CIListDeleted) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceK8CIListDeleted(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceK8CIListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_k8ci_list_deleted"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceK8CIListDeleted) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceK8CIListDeleted")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceK8CIListDeleted successfully")
}

View File

@@ -0,0 +1,59 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/utilities"
)
// K8CIDataSource flattens data source for K8CI.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func K8CIDataSource(ctx context.Context, state *models.DataSourceK8CIModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.K8CIDataSource")
diags := diag.Diagnostics{}
k8ciID := uint64(state.K8ciID.ValueInt64())
recordK8ci, diags := utilities.K8ciDataSourceCheckPresence(ctx, k8ciID, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.K8CIDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceK8CIModel{
K8ciID: state.K8ciID,
Timeouts: state.Timeouts,
Description: types.StringValue(recordK8ci.Description),
GID: types.Int64Value(int64(recordK8ci.GID)),
GUID: types.Int64Value(int64(recordK8ci.GUID)),
Id: types.StringValue(id.String()),
LBImageID: types.Int64Value(int64(recordK8ci.LBImageID)),
MasterDriver: types.StringValue(recordK8ci.MasterDriver),
MasterImageId: types.Int64Value(int64(recordK8ci.MasterImageID)),
MaxMasterCount: types.Int64Value(int64(recordK8ci.MaxMasterCount)),
MaxWorkerCount: types.Int64Value(int64(recordK8ci.MaxWorkerCount)),
Milestones: types.Int64Value(int64(recordK8ci.Milestones)),
Name: types.StringValue(recordK8ci.Name),
NetworkPlugins: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &recordK8ci.NetworkPlugins),
SharedWith: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &recordK8ci.SharedWith),
Status: types.StringValue(recordK8ci.Status),
Version: types.StringValue(recordK8ci.Version),
WorkerDriver: types.StringValue(recordK8ci.WorkerDriver),
WorkerImageId: types.Int64Value(int64(recordK8ci.WorkerImageID)),
}
tflog.Info(ctx, "End flattens.K8CIDataSource")
return nil
}

View File

@@ -0,0 +1,75 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/utilities"
)
// K8CIListDataSource flattens data source for k8ci list.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func K8CIListDataSource(ctx context.Context, state *models.DataSourceK8CIListModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.K8CIListDataSource")
diags := diag.Diagnostics{}
k8ciList, diags := utilities.K8CIListDataSourceCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.K8CIListDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceK8CIListModel{
ByID: state.ByID,
Name: state.Name,
Status: state.Status,
WorkerDriver: state.WorkerDriver,
MasterDriver: state.MasterDriver,
NetworkPlugin: state.NetworkPlugin,
IncludeDisabled: state.IncludeDisabled,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
EntryCount: types.Int64Value(int64(k8ciList.EntryCount)),
}
items := make([]models.ItemK8ciModel, 0, len(k8ciList.Data))
for _, item := range k8ciList.Data {
v := models.ItemK8ciModel{
CreatedTime: types.Int64Value(int64(item.CreatedTime)),
Description: types.StringValue(item.Description),
GID: types.Int64Value(int64(item.GID)),
GUID: types.Int64Value(int64(item.GUID)),
K8ciID: types.Int64Value(int64(item.ID)),
LBImageID: types.Int64Value(int64(item.LBImageID)),
MasterDriver: types.StringValue(item.MasterDriver),
MasterImageId: types.Int64Value(int64(item.MasterImageID)),
MaxMasterCount: types.Int64Value(int64(item.MaxMasterCount)),
MaxWorkerCount: types.Int64Value(int64(item.MaxWorkerCount)),
Name: types.StringValue(item.Name),
SharedWith: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &item.SharedWith),
Status: types.StringValue(item.Status),
Version: types.StringValue(item.Version),
WorkerDriver: types.StringValue(item.WorkerDriver),
WorkerImageId: types.Int64Value(int64(item.WorkerImageID)),
}
items = append(items, v)
}
state.Items = items
tflog.Info(ctx, "End flattens.K8CIListDataSource")
return nil
}

View File

@@ -0,0 +1,73 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/utilities"
)
// K8CIListDeletedDataSource flattens data source for k8ci deleted list.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func K8CIListDeletedDataSource(ctx context.Context, state *models.DataSourceK8CIListDeletedModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.K8CIListDataSource")
diags := diag.Diagnostics{}
k8ciList, diags := utilities.K8CIListDeletedDataSourceCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.K8CIListDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceK8CIListDeletedModel{
ByID: state.ByID,
Name: state.Name,
WorkerDriver: state.WorkerDriver,
MasterDriver: state.MasterDriver,
NetworkPlugin: state.NetworkPlugin,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
EntryCount: types.Int64Value(int64(k8ciList.EntryCount)),
}
items := make([]models.ItemK8ciDeletedModel, 0, len(k8ciList.Data))
for _, item := range k8ciList.Data {
v := models.ItemK8ciDeletedModel{
CreatedTime: types.Int64Value(int64(item.CreatedTime)),
Description: types.StringValue(item.Description),
GID: types.Int64Value(int64(item.GID)),
GUID: types.Int64Value(int64(item.GUID)),
K8ciID: types.Int64Value(int64(item.ID)),
LBImageID: types.Int64Value(int64(item.LBImageID)),
MasterDriver: types.StringValue(item.MasterDriver),
MasterImageId: types.Int64Value(int64(item.MasterImageID)),
MaxMasterCount: types.Int64Value(int64(item.MaxMasterCount)),
MaxWorkerCount: types.Int64Value(int64(item.MaxWorkerCount)),
Name: types.StringValue(item.Name),
SharedWith: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &item.SharedWith),
Status: types.StringValue(item.Status),
Version: types.StringValue(item.Version),
WorkerDriver: types.StringValue(item.WorkerDriver),
WorkerImageId: types.Int64Value(int64(item.WorkerImageID)),
}
items = append(items, v)
}
state.Items = items
tflog.Info(ctx, "End flattens.K8CIListDataSource")
return nil
}

View File

@@ -0,0 +1,59 @@
package flattens
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/utilities"
)
// K8CIResource flattens resource for K8CI.
// Return error in case data source is not found on the platform.
// Flatten errors are added to tflog.
func K8CIResource(ctx context.Context, state *models.ResourceK8CIModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.K8CIResource")
diags := diag.Diagnostics{}
recordK8ci, diags := utilities.K8CIResourceCheckPresence(ctx, state, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.K8CIResource: before flatten")
*state = models.ResourceK8CIModel{
Name: state.Name,
Version: state.Version,
MasterDriver: state.MasterDriver,
MasterImageId: state.MasterImageId,
MaxMasterCount: state.MaxMasterCount,
MaxWorkerCount: state.MaxWorkerCount,
NetworkPlugins: state.NetworkPlugins,
WorkerDriver: state.WorkerDriver,
WorkerImageId: state.WorkerImageId,
Timeouts: state.Timeouts,
Description: state.Description,
Enabled: state.Enabled,
Permanently: state.Permanently,
Restore: state.Restore,
SharedWith: state.SharedWith,
GID: types.Int64Value(int64(recordK8ci.GID)),
GUID: types.Int64Value(int64(recordK8ci.GUID)),
Id: types.StringValue(strconv.Itoa(int(recordK8ci.ID))),
K8ciID: types.Int64Value(int64(recordK8ci.ID)),
LBImageID: types.Int64Value(int64(recordK8ci.LBImageID)),
Milestones: types.Int64Value(int64(recordK8ci.Milestones)),
Status: types.StringValue(recordK8ci.Status),
}
tflog.Info(ctx, "End flattens.K8CIResource")
return nil
}

View File

@@ -0,0 +1,31 @@
package k8ci
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/ic"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/models"
)
func resourceK8CIInputChecks(ctx context.Context, plan *models.ResourceK8CIModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
masterImageId := uint64(plan.MasterImageId.ValueInt64())
workerImageId := uint64(plan.WorkerImageId.ValueInt64())
images := make([]uint64, 0)
images = append(images, masterImageId)
if workerImageId != masterImageId {
images = append(images, workerImageId)
}
tflog.Info(ctx, "resourceK8CIInputChecks: exist images check", map[string]any{"master_image_id": masterImageId, "worker_image_id": workerImageId})
err := ic.ExistImages(ctx, images, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about images"), err.Error())
}
return diags
}

View File

@@ -0,0 +1,31 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceK8CIModel struct {
// request fields
K8ciID types.Int64 `tfsdk:"k8ci_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Description types.String `tfsdk:"desc"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
Id types.String `tfsdk:"id"`
LBImageID types.Int64 `tfsdk:"lb_image_id"`
MasterDriver types.String `tfsdk:"master_driver"`
MasterImageId types.Int64 `tfsdk:"master_image_id"`
MaxMasterCount types.Int64 `tfsdk:"max_master_count"`
MaxWorkerCount types.Int64 `tfsdk:"max_worker_count"`
Milestones types.Int64 `tfsdk:"milestones"`
Name types.String `tfsdk:"name"`
NetworkPlugins types.List `tfsdk:"network_plugins"`
SharedWith types.List `tfsdk:"shared_with"`
Status types.String `tfsdk:"status"`
Version types.String `tfsdk:"version"`
WorkerDriver types.String `tfsdk:"worker_driver"`
WorkerImageId types.Int64 `tfsdk:"worker_image_id"`
}

View File

@@ -0,0 +1,45 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceK8CIListModel struct {
// request fields
ByID types.Int64 `tfsdk:"by_id"`
Name types.String `tfsdk:"name"`
Status types.String `tfsdk:"status"`
WorkerDriver types.String `tfsdk:"worker_driver"`
MasterDriver types.String `tfsdk:"master_driver"`
NetworkPlugin types.String `tfsdk:"network_plugin"`
IncludeDisabled types.Bool `tfsdk:"include_disabled"`
Page types.Int64 `tfsdk:"page"`
SortBy types.String `tfsdk:"sort_by"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemK8ciModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemK8ciModel struct {
CreatedTime types.Int64 `tfsdk:"created_time"`
Description types.String `tfsdk:"desc"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
K8ciID types.Int64 `tfsdk:"k8ci_id"`
LBImageID types.Int64 `tfsdk:"lb_image_id"`
MasterDriver types.String `tfsdk:"master_driver"`
MasterImageId types.Int64 `tfsdk:"master_image_id"`
MaxMasterCount types.Int64 `tfsdk:"max_master_count"`
MaxWorkerCount types.Int64 `tfsdk:"max_worker_count"`
Name types.String `tfsdk:"name"`
SharedWith types.List `tfsdk:"shared_with"`
Status types.String `tfsdk:"status"`
Version types.String `tfsdk:"version"`
WorkerDriver types.String `tfsdk:"worker_driver"`
WorkerImageId types.Int64 `tfsdk:"worker_image_id"`
}

View File

@@ -0,0 +1,43 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceK8CIListDeletedModel struct {
// request fields
ByID types.Int64 `tfsdk:"by_id"`
Name types.String `tfsdk:"name"`
WorkerDriver types.String `tfsdk:"worker_driver"`
MasterDriver types.String `tfsdk:"master_driver"`
NetworkPlugin types.String `tfsdk:"network_plugin"`
Page types.Int64 `tfsdk:"page"`
SortBy types.String `tfsdk:"sort_by"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
Items []ItemK8ciDeletedModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemK8ciDeletedModel struct {
CreatedTime types.Int64 `tfsdk:"created_time"`
Description types.String `tfsdk:"desc"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
K8ciID types.Int64 `tfsdk:"k8ci_id"`
LBImageID types.Int64 `tfsdk:"lb_image_id"`
MasterDriver types.String `tfsdk:"master_driver"`
MasterImageId types.Int64 `tfsdk:"master_image_id"`
MaxMasterCount types.Int64 `tfsdk:"max_master_count"`
MaxWorkerCount types.Int64 `tfsdk:"max_worker_count"`
Name types.String `tfsdk:"name"`
SharedWith types.List `tfsdk:"shared_with"`
Status types.String `tfsdk:"status"`
Version types.String `tfsdk:"version"`
WorkerDriver types.String `tfsdk:"worker_driver"`
WorkerImageId types.Int64 `tfsdk:"worker_image_id"`
}

View File

@@ -0,0 +1,37 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ResourceK8CIModel struct {
// required fields
Name types.String `tfsdk:"name"`
Version types.String `tfsdk:"version"`
MasterDriver types.String `tfsdk:"master_driver"`
MasterImageId types.Int64 `tfsdk:"master_image_id"`
MaxMasterCount types.Int64 `tfsdk:"max_master_count"`
MaxWorkerCount types.Int64 `tfsdk:"max_worker_count"`
NetworkPlugins types.List `tfsdk:"network_plugins"`
WorkerDriver types.String `tfsdk:"worker_driver"`
WorkerImageId types.Int64 `tfsdk:"worker_image_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// optional fields
Description types.String `tfsdk:"desc"`
Enabled types.Bool `tfsdk:"enabled"`
Permanently types.Bool `tfsdk:"permanently"`
Restore types.Bool `tfsdk:"restore"`
SharedWith types.List `tfsdk:"shared_with"`
// response fields
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
K8ciID types.Int64 `tfsdk:"k8ci_id"`
Id types.String `tfsdk:"id"`
LBImageID types.Int64 `tfsdk:"lb_image_id"`
Milestones types.Int64 `tfsdk:"milestones"`
Status types.String `tfsdk:"status"`
}

View File

@@ -0,0 +1,330 @@
package k8ci
import (
"context"
"reflect"
"strconv"
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8ci"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/schemas"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/utilities"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ resource.Resource = &resourceK8CI{}
_ resource.ResourceWithImportState = &resourceK8CI{}
)
// NewResourceK8Ci is a helper function to simplify the provider implementation.
func NewResourceK8Ci() resource.Resource {
return &resourceK8CI{}
}
// resourceK8CI is the resource implementation.
type resourceK8CI struct {
client *decort.DecortClient
}
// Create creates the resource and sets the initial Terraform state.
func (r *resourceK8CI) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
// Get plan to create resource group
var plan models.ResourceK8CIModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceK8CI: Error receiving the plan")
return
}
tflog.Info(ctx, "Create resourceK8CI: got plan successfully", map[string]any{"name": plan.Name.ValueString()})
tflog.Info(ctx, "Create resourceK8CI: start creating", map[string]any{"name": plan.Name.ValueString()})
// Set timeouts
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceK8CI: Error set timeout")
return
}
tflog.Info(ctx, "Create resourceK8CI: set timeouts successfully", map[string]any{
"name": plan.Name.ValueString(),
"createTimeout": createTimeout})
ctx, cancel := context.WithTimeout(ctx, createTimeout)
defer cancel()
// Check if input values are valid in the platform
tflog.Info(ctx, "Create resourceK8CI: starting input checks", map[string]any{"name": plan.Name.ValueString()})
resp.Diagnostics.Append(resourceK8CIInputChecks(ctx, &plan, r.client)...)
if diags.HasError() {
tflog.Error(ctx, "Create resourceK8CI: Error input checks")
return
}
tflog.Info(ctx, "Create resourceK8CI: input checks successful", map[string]any{"name": plan.Name.ValueString()})
// Make create request and get response
createReq, diags := utilities.CreateRequestResourceK8CI(ctx, &plan)
resp.Diagnostics.Append(diags...)
if diags.HasError() {
tflog.Error(ctx, "Create resourceK8CI: Error response for create request of resource k8ci")
return
}
tflog.Info(ctx, "Create resourceK8CI: before call CloudBroker().K8CI().Create", map[string]any{"req": createReq})
k8ciid, err := r.client.CloudBroker().K8CI().Create(ctx, createReq)
if err != nil {
resp.Diagnostics.AddError(
"Create resourceK8CI: unable to Create k8ci",
err.Error(),
)
return
}
plan.Id = types.StringValue(strconv.Itoa(int(k8ciid)))
tflog.Info(ctx, "Create resourceK8CI: k8ci created", map[string]any{"k8ci_id": k8ciid, "name": plan.Name.ValueString()})
// additional settings after k8ci creation: in case of failures, warnings are added to resp.Diagnostics,
// because additional settings failure is not critical. If errors were added instead of warnings, terraform
// framework would mark resource as tainted and delete it, which would be unwanted behaviour.
// enable/disable of k8ci after creation, warnings added to resp.Diagnostics in case of failure.
temp := utilities.K8CIResourceEnableDisable(ctx, &plan, r.client)
for _, d := range temp {
if d.Severity() == diag.SeverityError {
resp.Diagnostics.AddWarning(d.Summary(), d.Detail())
}
}
tflog.Info(ctx, "Create resourceK8CI: resource creation is completed", map[string]any{"k8ci_id": k8ciid})
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.K8CIResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set state to fully populated data
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
if resp.Diagnostics.HasError() {
return
}
}
// Read refreshes the Terraform state with the latest data.
func (r *resourceK8CI) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
// Get current state
var state models.ResourceK8CIModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceK8CI: Error get state")
return
}
tflog.Info(ctx, "Read resourceK8CI: got state successfully", map[string]any{"k8ci_id": state.Id.ValueString()})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceK8CI: Error set timeout")
return
}
tflog.Info(ctx, "Read resourceK8CI: set timeouts successfully", map[string]any{
"k8ci_id": state.Id.ValueString(),
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// read status
resp.Diagnostics.Append(utilities.K8CIReadStatus(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceK8CI: Error reading resource group status")
return
}
// Overwrite items with refreshed state
resp.Diagnostics.Append(flattens.K8CIResource(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceK8CI: Error flatten resource group")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceK8CI: Error set state")
return
}
tflog.Info(ctx, "End read resource group")
}
// Update updates the resource and sets the updated Terraform state on success.
func (r *resourceK8CI) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
// Retrieve values from plan
var plan models.ResourceK8CIModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceK8CI: Error receiving the plan")
return
}
tflog.Info(ctx, "Update resourceK8CI: got plan successfully", map[string]any{"k8ci_id": plan.Id.ValueString()})
// Retrieve values from state
var state models.ResourceK8CIModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceK8CI: Error receiving the state")
return
}
tflog.Info(ctx, "Update resourceK8CI: got state successfully", map[string]any{"k8ci_id": state.Id.ValueString()})
// Set timeouts
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout600s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error set timeout")
return
}
tflog.Info(ctx, "Update resourceK8CI: set timeouts successfully", map[string]any{
"k8ci_id": state.Id.ValueString(),
"updateTimeout": updateTimeout})
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
defer cancel()
// Checking for values in the platform
tflog.Info(ctx, "Update resourceK8CI: starting input checks", map[string]any{"k8ci_id": plan.Id.ValueString()})
resp.Diagnostics.Append(resourceK8CIInputChecks(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceK8CI: Error input checks")
return
}
tflog.Info(ctx, "Update resourceK8CI: input checks successful", map[string]any{"k8ci_id": state.Id.ValueString()})
// Checking that immutable variables have not changed
tflog.Info(ctx, "Update resourceK8CI: starting immutable variables checks", map[string]any{"k8ci_id": plan.Id.ValueString()})
resp.Diagnostics.Append(utilities.K8CIIUpdateVarChecks(&plan, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceK8CI: Error input checks")
return
}
tflog.Info(ctx, "Update resourceK8CI: input checks successful", map[string]any{"k8ci_id": state.Id.ValueString()})
plan.Id = state.Id
// grant/revoke access for K8CI
if !reflect.DeepEqual(plan.SharedWith, state.SharedWith) {
resp.Diagnostics.Append(utilities.K8CISharedWithUpdate(ctx, &plan, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceK8CI: Error grant/revoke access for k8ci")
return
}
}
// enable or disable k8ci
if !plan.Enabled.Equal(state.Enabled) {
resp.Diagnostics.Append(utilities.K8CIResourceEnableDisable(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceK8CI: Error enable/disable k8ci")
return
}
}
tflog.Info(ctx, "Update resourceK8CI: resource update is completed", map[string]any{"k8ci_id": plan.Id.ValueString()})
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.K8CIResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set state to fully populated data
diags = resp.State.Set(ctx, plan)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
// Delete deletes the resource and removes the Terraform state on success.
func (r *resourceK8CI) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
// Get current state
var state models.ResourceK8CIModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceK8CI: Error get state")
return
}
tflog.Info(ctx, "Delete resourceK8CI: got state successfully", map[string]any{"k8ci_id": state.Id.ValueString()})
// Set timeouts
deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceK8CI: Error set timeout")
return
}
tflog.Info(ctx, "Delete resourceK8CI: set timeouts successfully", map[string]any{
"k8ci_id": state.Id.ValueString(),
"deleteTimeout": deleteTimeout})
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
defer cancel()
k8ciid, err := strconv.ParseUint(state.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("Cannot parsed ID K8CI from state", err.Error())
return
}
// Delete existing resource group
delReq := k8ci.DeleteRequest{
K8CIID: k8ciid,
Permanently: state.Permanently.ValueBool(),
}
_, err = r.client.CloudBroker().K8CI().Delete(ctx, delReq)
if err != nil {
resp.Diagnostics.AddError("Delete resourceK8CI: Error deleting k8ci with error: ", err.Error())
return
}
tflog.Info(ctx, "End delete K8CI", map[string]any{"k8ci_id": state.Id.ValueString()})
}
// Schema defines the schema for the resource.
func (r *resourceK8CI) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaResourceK8CI(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
},
}
}
// Metadata returns the resource type name.
func (r *resourceK8CI) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_k8ci"
}
// Configure adds the provider configured client to the resource.
func (r *resourceK8CI) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure resourceK8CI")
r.client = client.Resource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure resourceK8CI successfully")
}
func (r *resourceK8CI) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
// Retrieve import ID and save to id attribute
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
}

View File

@@ -0,0 +1,73 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceK8CI() map[string]schema.Attribute {
return map[string]schema.Attribute{
"k8ci_id": schema.Int64Attribute{
Required: true,
Description: "K8CI ID",
},
"desc": schema.StringAttribute{
Computed: true,
Description: "description",
},
"gid": schema.Int64Attribute{
Computed: true,
Description: "gid",
},
"guid": schema.Int64Attribute{
Computed: true,
Description: "guid",
},
"id": schema.StringAttribute{
Computed: true,
},
"lb_image_id": schema.Int64Attribute{
Computed: true,
Description: "LB Image ID",
},
"master_driver": schema.StringAttribute{
Computed: true,
},
"master_image_id": schema.Int64Attribute{
Computed: true,
},
"max_master_count": schema.Int64Attribute{
Computed: true,
},
"max_worker_count": schema.Int64Attribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
Description: "K8CI name",
},
"network_plugins": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
"shared_with": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"status": schema.StringAttribute{
Computed: true,
},
"version": schema.StringAttribute{
Computed: true,
},
"worker_driver": schema.StringAttribute{
Computed: true,
},
"worker_image_id": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,113 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceK8CIList() map[string]schema.Attribute {
return map[string]schema.Attribute{
"by_id": schema.Int64Attribute{
Optional: true,
Description: "Filter by ID",
},
"name": schema.StringAttribute{
Optional: true,
Description: "Filter by name",
},
"status": schema.StringAttribute{
Optional: true,
Description: "Filter by status",
},
"worker_driver": schema.StringAttribute{
Optional: true,
Description: "Filter by worker driver",
},
"master_driver": schema.StringAttribute{
Optional: true,
Description: "Filter by master driver",
},
"network_plugin": schema.StringAttribute{
Optional: true,
Description: "Filter by network plugin",
},
"include_disabled": schema.BoolAttribute{
Optional: true,
Description: "Include disabled k8cis in result",
},
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "Page number",
},
"size": schema.Int64Attribute{
Optional: true,
Description: "Page size",
},
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"created_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"k8ci_id": schema.Int64Attribute{
Computed: true,
},
"lb_image_id": schema.Int64Attribute{
Computed: true,
},
"master_driver": schema.StringAttribute{
Computed: true,
},
"master_image_id": schema.Int64Attribute{
Computed: true,
},
"max_master_count": schema.Int64Attribute{
Computed: true,
},
"max_worker_count": schema.Int64Attribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
"shared_with": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"status": schema.StringAttribute{
Computed: true,
},
"version": schema.StringAttribute{
Computed: true,
},
"worker_driver": schema.StringAttribute{
Computed: true,
},
"worker_image_id": schema.Int64Attribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,105 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceK8CIListDeleted() map[string]schema.Attribute {
return map[string]schema.Attribute{
"by_id": schema.Int64Attribute{
Optional: true,
Description: "Filter by ID",
},
"name": schema.StringAttribute{
Optional: true,
Description: "Filter by name",
},
"worker_driver": schema.StringAttribute{
Optional: true,
Description: "Filter by worker driver",
},
"master_driver": schema.StringAttribute{
Optional: true,
Description: "Filter by master driver",
},
"network_plugin": schema.StringAttribute{
Optional: true,
Description: "Filter by network plugin",
},
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "Page number",
},
"size": schema.Int64Attribute{
Optional: true,
Description: "Page size",
},
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"created_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"k8ci_id": schema.Int64Attribute{
Computed: true,
},
"lb_image_id": schema.Int64Attribute{
Computed: true,
},
"master_driver": schema.StringAttribute{
Computed: true,
},
"master_image_id": schema.Int64Attribute{
Computed: true,
},
"max_master_count": schema.Int64Attribute{
Computed: true,
},
"max_worker_count": schema.Int64Attribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
"shared_with": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"status": schema.StringAttribute{
Computed: true,
},
"version": schema.StringAttribute{
Computed: true,
},
"worker_driver": schema.StringAttribute{
Computed: true,
},
"worker_image_id": schema.Int64Attribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,84 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaResourceK8CI() map[string]schema.Attribute {
return map[string]schema.Attribute{
"name": schema.StringAttribute{
Required: true,
Description: "K8CI name",
},
"version": schema.StringAttribute{
Required: true,
},
"master_driver": schema.StringAttribute{
Required: true,
},
"master_image_id": schema.Int64Attribute{
Required: true,
},
"max_master_count": schema.Int64Attribute{
Required: true,
},
"max_worker_count": schema.Int64Attribute{
Required: true,
},
"worker_image_id": schema.Int64Attribute{
Required: true,
},
"worker_driver": schema.StringAttribute{
Required: true,
},
"network_plugins": schema.ListAttribute{
Required: true,
ElementType: types.StringType,
},
"enabled": schema.BoolAttribute{
Optional: true,
},
"permanently": schema.BoolAttribute{
Optional: true,
Computed: true,
Default: booldefault.StaticBool(true),
// default value is true
},
"restore": schema.BoolAttribute{
Optional: true,
Computed: true,
Default: booldefault.StaticBool(true),
// default value is true
},
"desc": schema.StringAttribute{
Optional: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"k8ci_id": schema.Int64Attribute{
Computed: true,
},
"id": schema.StringAttribute{
Computed: true,
},
"lb_image_id": schema.Int64Attribute{
Computed: true,
},
"milestones": schema.Int64Attribute{
Computed: true,
},
"shared_with": schema.ListAttribute{
Optional: true,
ElementType: types.Int64Type,
},
"status": schema.StringAttribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,27 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8ci"
)
func K8ciDataSourceCheckPresence(ctx context.Context, k8ciID uint64, c *decort.DecortClient) (*k8ci.RecordK8CI, diag.Diagnostics) {
tflog.Info(ctx, fmt.Sprintf("K8ciDataSourceCheckPresence: Get info about k8ci with ID - %v", k8ciID))
diags := diag.Diagnostics{}
recordK8ci, err := c.CloudBroker().K8CI().Get(ctx, k8ci.GetRequest{K8CIID: k8ciID})
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about k8ci with ID %v", k8ciID), err.Error())
return nil, diags
}
tflog.Info(ctx, "K8ciDataSourceCheckPresence: response from CloudBroker().K8CI().Get", map[string]any{"k8ci_id": k8ciID, "response": recordK8ci})
return recordK8ci, nil
}

View File

@@ -0,0 +1,59 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8ci"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/models"
)
func K8CIListDataSourceCheckPresence(ctx context.Context, plan *models.DataSourceK8CIListModel, c *decort.DecortClient) (*k8ci.ListK8CI, diag.Diagnostics) {
tflog.Info(ctx, "K8CIListDataSourceCheckPresence: Get info about k8ci list")
diags := diag.Diagnostics{}
listReq := k8ci.ListRequest{}
if !plan.ByID.IsNull() {
listReq.ByID = uint64(plan.ByID.ValueInt64())
}
if !plan.Name.IsNull() {
listReq.Name = plan.Name.ValueString()
}
if !plan.Status.IsNull() {
listReq.Status = plan.Status.ValueString()
}
if !plan.WorkerDriver.IsNull() {
listReq.WorkerDriver = plan.WorkerDriver.ValueString()
}
if !plan.MasterDriver.IsNull() {
listReq.MasterDriver = plan.MasterDriver.ValueString()
}
if !plan.NetworkPlugin.IsNull() {
listReq.NetworkPlugins = plan.NetworkPlugin.ValueString()
}
if !plan.IncludeDisabled.IsNull() {
listReq.IncludeDisabled = plan.IncludeDisabled.ValueBool()
}
if !plan.SortBy.IsNull() {
listReq.SortBy = plan.SortBy.ValueString()
}
if !plan.Page.IsNull() {
listReq.Page = uint64(plan.Page.ValueInt64())
}
if !plan.Size.IsNull() {
listReq.Size = uint64(plan.Size.ValueInt64())
}
tflog.Info(ctx, "K8CIListDataSourceCheckPresence: before call CloudBroker().K8CI().List", map[string]any{"req": listReq})
list, err := c.CloudBroker().K8CI().List(ctx, listReq)
if err != nil {
diags.AddError("Cannot get info about k8ci list", err.Error())
return nil, diags
}
tflog.Info(ctx, "K8CIListDataSourceCheckPresence: successfull response from CloudBroker().K8CI().List")
return list, nil
}

View File

@@ -0,0 +1,53 @@
package utilities
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8ci"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/models"
)
func K8CIListDeletedDataSourceCheckPresence(ctx context.Context, plan *models.DataSourceK8CIListDeletedModel, c *decort.DecortClient) (*k8ci.ListK8CI, diag.Diagnostics) {
tflog.Info(ctx, "K8CIListDeletedDataSourceCheckPresence: Get info about k8ci list")
diags := diag.Diagnostics{}
listReq := k8ci.ListDeletedRequest{}
if !plan.ByID.IsNull() {
listReq.ByID = uint64(plan.ByID.ValueInt64())
}
if !plan.Name.IsNull() {
listReq.Name = plan.Name.ValueString()
}
if !plan.WorkerDriver.IsNull() {
listReq.WorkerDriver = plan.WorkerDriver.ValueString()
}
if !plan.MasterDriver.IsNull() {
listReq.MasterDriver = plan.MasterDriver.ValueString()
}
if !plan.NetworkPlugin.IsNull() {
listReq.NetworkPlugins = plan.NetworkPlugin.ValueString()
}
if !plan.SortBy.IsNull() {
listReq.SortBy = plan.SortBy.ValueString()
}
if !plan.Page.IsNull() {
listReq.Page = uint64(plan.Page.ValueInt64())
}
if !plan.Size.IsNull() {
listReq.Size = uint64(plan.Size.ValueInt64())
}
tflog.Info(ctx, "K8CIListDeletedDataSourceCheckPresence: before call CloudBroker().K8CI().ListDeleted", map[string]any{"req": listReq})
list, err := c.CloudBroker().K8CI().ListDeleted(ctx, listReq)
if err != nil {
diags.AddError("Cannot get info about k8ci deleted list", err.Error())
return nil, diags
}
tflog.Info(ctx, "K8CIListDeletedDataSourceCheckPresence: successfull response from CloudBroker().K8CI().ListDeleted")
return list, nil
}

View File

@@ -0,0 +1,331 @@
package utilities
import (
"context"
"fmt"
"strconv"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8ci"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status"
)
func CreateRequestResourceK8CI(ctx context.Context, plan *models.ResourceK8CIModel) (k8ci.CreateRequest, diag.Diagnostics) {
tflog.Info(ctx, "Start CreateRequestResourceK8CI", map[string]any{
"name": plan.Name.ValueString(),
})
// set up required parameters in resource group create request
createReq := k8ci.CreateRequest{
Name: plan.Name.ValueString(),
Version: plan.Version.ValueString(),
MasterDriver: plan.MasterDriver.ValueString(),
WorkerDriver: plan.WorkerDriver.ValueString(),
MaxMasterCount: uint64(plan.MaxMasterCount.ValueInt64()),
MaxWorkerCount: uint64(plan.MaxWorkerCount.ValueInt64()),
MasterImageID: uint64(plan.MasterImageId.ValueInt64()),
WorkerImageID: uint64(plan.WorkerImageId.ValueInt64()),
}
networkPlugins := make([]string, 0, len(plan.NetworkPlugins.Elements()))
for _, plugin := range plan.NetworkPlugins.Elements() {
networkPlugins = append(networkPlugins, plugin.(types.String).ValueString())
}
createReq.NetworkPlugins = networkPlugins
if !plan.Description.IsNull() {
createReq.Description = plan.Description.ValueString()
}
if !plan.SharedWith.IsNull() {
sharedWith := make([]uint64, 0, len(plan.SharedWith.Elements()))
for _, shared := range plan.SharedWith.Elements() {
sharedWith = append(sharedWith, uint64(shared.(types.Int64).ValueInt64()))
}
createReq.SharedWith = sharedWith
}
return createReq, nil
}
func K8CIResourceEnableDisable(ctx context.Context, plan *models.ResourceK8CIModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "EnableDisable k8ci with ID", map[string]any{"k8ci_id": plan.Id.ValueString()})
diags := diag.Diagnostics{}
k8ciid, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("Cannot parsed ID k8ci from state", err.Error())
return diags
}
if plan.Enabled.IsNull() || plan.Enabled.ValueBool() {
tflog.Info(ctx, "Enable k8ci with ID", map[string]any{"k8ci_id": plan.Id.ValueString()})
_, err := c.CloudBroker().K8CI().Enable(ctx, k8ci.EnableRequest{K8CIID: k8ciid})
if err != nil {
diags.AddError("K8CIResourceEnableDisable: error to enable k8ci", err.Error())
return diags
}
} else {
tflog.Info(ctx, "Disable k8ci with ID", map[string]any{"k8ci_id": plan.Id.ValueString()})
_, err := c.CloudBroker().K8CI().Disable(ctx, k8ci.DisableRequest{K8CIID: k8ciid})
if err != nil {
diags.AddError("K8CIResourceEnableDisable: error to disable k8ci", err.Error())
return diags
}
}
return diags
}
func K8CIResourceCheckPresence(ctx context.Context, plan *models.ResourceK8CIModel, c *decort.DecortClient) (*k8ci.RecordK8CI, diag.Diagnostics) {
tflog.Info(ctx, fmt.Sprintf("K8CIResourceCheckPresence: Get info about k8ci with ID - %v", plan.Id.ValueString()))
diags := diag.Diagnostics{}
k8ciid, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("Cannot parsed ID k8ci from state", err.Error())
return nil, diags
}
recordK8ci, err := c.CloudBroker().K8CI().Get(ctx, k8ci.GetRequest{K8CIID: k8ciid})
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about k8ci with ID %v", k8ciid), err.Error())
return nil, diags
}
tflog.Info(ctx, "K8CIResourceCheckPresence: response from CloudBroker().K8CI().Get", map[string]any{"k8ci_id": k8ciid, "response": recordK8ci})
return recordK8ci, nil
}
func K8CIReadStatus(ctx context.Context, plan *models.ResourceK8CIModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Read status k8ci with ID", map[string]any{"k8ci_id": plan.Id.ValueString()})
diags := diag.Diagnostics{}
k8ciid, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("Cannot parsed ID k8ci from state", err.Error())
return diags
}
k8ciItem, err := c.CloudBroker().K8CI().Get(ctx, k8ci.GetRequest{K8CIID: k8ciid})
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about k8ci with ID %v", k8ciItem), err.Error())
return diags
}
switch k8ciItem.Status {
case status.Modeled:
diags.AddError("Error:", fmt.Sprintf("The k8ci is in status: %s, please, contact support for more information", k8ciItem.Status))
return diags
case status.Deleted:
if plan.Restore.ValueBool() || plan.Restore.IsNull() {
diags = K8CIRestore(ctx, plan, c)
if diags.HasError() {
tflog.Error(ctx, "Error restore K8CI", map[string]any{"k8ci_id": plan.Id.ValueString()})
return diags
}
} else {
diags.AddError("k8ci in status Deleted:", "please clean state, or restore k8ci")
return diags
}
diags = K8CIResourceEnableDisable(ctx, plan, c)
if diags.HasError() {
tflog.Error(ctx, "Error enable/disable k8ci", map[string]any{"k8ci_id": plan.Id.ValueString()})
return diags
}
case status.Destroying:
diags.AddError("Error:", fmt.Sprintf("The k8ci is in progress with status: %s", k8ciItem.Status))
return diags
case status.Destroyed:
diags.AddError("Error:", "The resource cannot be updated because it has been destroyed")
return diags
}
tflog.Info(ctx, "Read status k8ci successfully", map[string]any{"k8ci_id": plan.Id.ValueString()})
return diags
}
func K8CIRestore(ctx context.Context, plan *models.ResourceK8CIModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Restore k8ci with ID", map[string]any{"k8ci_id": plan.Id.ValueString()})
diags := diag.Diagnostics{}
k8ciid, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("Cannot parsed ID k8ci from state", err.Error())
return diags
}
_, err = c.CloudBroker().K8CI().Restore(ctx, k8ci.RestoreRequest{K8CIID: k8ciid})
if err != nil {
diags.AddError(fmt.Sprintf("Cannot restore k8ci with ID - %s", plan.Id.ValueString()), err.Error())
return diags
}
tflog.Info(ctx, "Restore k8ci successfully", map[string]any{"k8ci_id": plan.Id.ValueString()})
return diags
}
func K8CIIUpdateVarChecks(plan *models.ResourceK8CIModel, state *models.ResourceK8CIModel) diag.Diagnostics {
diags := diag.Diagnostics{}
if !plan.Name.Equal(state.Name) {
diags.AddError(
"Update resourceK8CI: Invalid input Name",
fmt.Sprintf("block Name must not be changed for resource with k8ci_id %s", plan.Id.ValueString()),
)
return diags
}
if !plan.Version.Equal(state.Version) {
diags.AddError(
"Update resourceK8CI: Invalid input Version",
fmt.Sprintf("block Version must not be changed for resource with k8ci_id %s", plan.Id.ValueString()),
)
return diags
}
if !plan.MasterDriver.Equal(state.MasterDriver) {
diags.AddError(
"Update resourceK8CI: Invalid MasterDriver",
fmt.Sprintf("block MasterDriver must not be changed for resource with k8ci_id %s", plan.Id.ValueString()),
)
return diags
}
if !plan.MasterImageId.Equal(state.MasterImageId) {
diags.AddError(
"Update resourceK8CI: Invalid MasterImageId",
fmt.Sprintf("block MasterImageId must not be changed for resource with k8ci_id %s", plan.Id.ValueString()),
)
return diags
}
if !plan.MaxMasterCount.Equal(state.MaxMasterCount) {
diags.AddError(
"Update resourceK8CI: Invalid MaxMasterCount",
fmt.Sprintf("block MaxMasterCount must not be changed for resource with k8ci_id %s", plan.Id.ValueString()),
)
return diags
}
if !plan.MaxWorkerCount.Equal(state.MaxWorkerCount) {
diags.AddError(
"Update resourceK8CI: Invalid MaxWorkerCount",
fmt.Sprintf("block MaxWorkerCount must not be changed for resource with k8ci_id %s", plan.Id.ValueString()),
)
return diags
}
if !plan.NetworkPlugins.Equal(state.NetworkPlugins) {
diags.AddError(
"Update resourceK8CI: Invalid NetworkPlugins",
fmt.Sprintf("block NetworkPlugins must not be changed for resource with k8ci_id %s", plan.Id.ValueString()),
)
return diags
}
if !plan.WorkerDriver.Equal(state.WorkerDriver) {
diags.AddError(
"Update resourceK8CI: Invalid WorkerDriver",
fmt.Sprintf("block WorkerDriver must not be changed for resource with k8ci_id %s", plan.Id.ValueString()),
)
return diags
}
if !plan.WorkerImageId.Equal(state.WorkerImageId) {
diags.AddError(
"Update resourceK8CI: Invalid WorkerImageId",
fmt.Sprintf("block WorkerImageId must not be changed for resource with k8ci_id %s", plan.Id.ValueString()),
)
return diags
}
return nil
}
func K8CISharedWithUpdate(ctx context.Context, plan *models.ResourceK8CIModel, state *models.ResourceK8CIModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
k8ciid, err := strconv.ParseUint(state.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("K8CISharedWithUpdate: cannot parsed ID k8ci from state", err.Error())
return diags
}
addSet, removeSet := difference(state.SharedWith, plan.SharedWith)
for _, account := range addSet {
accountId := uint64(account.(types.Int64).ValueInt64())
tflog.Info(ctx, fmt.Sprintf("K8CISharedWithUpdate: Start add account with ID - %d to sharedWith access list to k8ci with ID - %d", accountId, k8ciid))
req := k8ci.AccessAddRequest{
K8CIID: k8ciid,
AccountId: accountId,
}
res, err := c.CloudBroker().K8CI().AccessAdd(ctx, req)
tflog.Info(ctx, "K8CISharedWithUpdate: response from CloudBroker().K8CI().AccessAdd", map[string]any{"k8ci_id": plan.Id.ValueString(), "response": res})
if err != nil {
diags.AddError(fmt.Sprintf("K8CISharedWithUpdate: Cannot add account with ID - %d", accountId), err.Error())
}
}
if diags.HasError() {
tflog.Error(ctx, "K8CISharedWithUpdate: Errors occurred while managing add accounts")
return diags
}
for _, account := range removeSet {
accountId := uint64(account.(types.Int64).ValueInt64())
tflog.Info(ctx, fmt.Sprintf("K8CISharedWithUpdate: Start remove account with ID - %d from sharedWith access list to k8ci with ID - %d", accountId, k8ciid))
req := k8ci.AccessRemoveRequest{
K8CIID: k8ciid,
AccountId: accountId,
}
res, err := c.CloudBroker().K8CI().AccessRemove(ctx, req)
tflog.Info(ctx, "K8CISharedWithUpdate: response from CloudBroker().K8CI().AccessRemove", map[string]any{"k8ci_id": plan.Id.ValueString(), "response": res})
if err != nil {
diags.AddError(fmt.Sprintf("K8CISharedWithUpdate: Cannot remove account with ID - %d", accountId), err.Error())
}
}
if diags.HasError() {
tflog.Error(ctx, "K8CISharedWithUpdate: Errors occurred while managing remove accounts")
return diags
}
tflog.Info(ctx, "K8CISharedWithUpdate: sharedWith access list is successfully update", map[string]any{"k8ci_id": k8ciid})
return nil
}
// difference returns lists added and removed values
func difference(oldSet, newSet types.List) (added, removed []any) {
oldMap := make(map[interface{}]struct{})
newMap := make(map[interface{}]struct{})
for _, elem := range oldSet.Elements() {
oldMap[elem] = struct{}{}
}
for _, elem := range newSet.Elements() {
newMap[elem] = struct{}{}
}
for elem := range newMap {
if _, found := oldMap[elem]; !found {
added = append(added, elem)
}
}
for elem := range oldMap {
if _, found := newMap[elem]; !found {
removed = append(removed, elem)
}
}
return
}

View File

@@ -196,13 +196,6 @@ func (r *resourceLB) Update(ctx context.Context, req resource.UpdateRequest, res
}
tflog.Info(ctx, "Update resourceLB: input checks successful", map[string]any{"ID": plan.ID.ValueString()})
// Read status lb and if it is necessary to restore it
resp.Diagnostics.Append(utilities.LBReadStatus(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error read status or restore")
return
}
// Update ha mode lb
if !plan.HAMode.Equal(state.HAMode) && plan.HAMode.ValueBool() {
resp.Diagnostics.Append(utilities.LBUpdateHaMode(ctx, &state, r.client)...)

View File

@@ -22,7 +22,7 @@ func CreateResourceLB(ctx context.Context, plan *models.ResourceLBModel, c *deco
createReq := lb.CreateRequest{
Name: plan.Name.ValueString(),
RGID: uint64(plan.RGID.ValueInt64()),
ExtNetID: uint64(plan.ExtNetID.ValueInt64()),
ExtNetID: plan.ExtNetID.ValueInt64(),
VINSID: uint64(plan.VINSID.ValueInt64()),
Start: plan.Start.ValueBool(),
}

View File

@@ -35,8 +35,8 @@ func RGDataSource(ctx context.Context, state *models.DataSourceRGModel, c *decor
id := uuid.New()
*state = models.DataSourceRGModel{
RGID: state.RGID,
Reason: state.Reason,
RGID: state.RGID,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),

View File

@@ -30,8 +30,8 @@ func RGUsageDataSource(ctx context.Context, state *models.DataSourceRGUsageModel
id := uuid.New()
*state = models.DataSourceRGUsageModel{
RGID: state.RGID,
Reason: state.Reason,
RGID: state.RGID,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),

View File

@@ -0,0 +1,88 @@
package flattens
import (
"context"
"fmt"
"strconv"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities"
)
// RGResource flattens resource for rg (resource group).
// Return error in case resource is not found on the platform.
// Flatten errors are added to tflog.
func RGResource(ctx context.Context, plan *models.ResourceRGModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.RGResource")
diags := diag.Diagnostics{}
rgId, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("Cannot parse resource group ID from state", err.Error())
return diags
}
recordRG, err := utilities.RGCheckPresence(ctx, rgId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about resource group with ID %v", rgId), err.Error())
return diags
}
tflog.Info(ctx, "flattens.RGResource: before flatten", map[string]any{"rg_id": rgId, "recordRG": recordRG})
*plan = models.ResourceRGModel{
AccountID: types.Int64Value(int64(recordRG.AccountID)),
GID: types.Int64Value(int64(recordRG.GID)),
Name: types.StringValue(recordRG.Name),
DefNetType: plan.DefNetType,
IPCIDR: plan.IPCIDR,
ResourceLimits: flattenResourceLimits(ctx, &recordRG.ResourceLimits),
ComputeFeatures: plan.ComputeFeatures,
ExtNetID: plan.ExtNetID,
ExtIP: plan.ExtIP,
Owner: plan.Owner,
Access: plan.Access,
DefNet: plan.DefNet,
Description: plan.Description,
Force: plan.Force,
Permanently: plan.Permanently,
RegisterComputes: plan.RegisterComputes,
Restore: plan.Restore,
Enable: plan.Enable,
UniqPools: plan.UniqPools,
Timeouts: plan.Timeouts,
RGID: types.Int64Value(int64(recordRG.ID)),
LastUpdated: plan.LastUpdated,
AccountName: types.StringValue(recordRG.AccountName),
ACL: flattenACL(ctx, &recordRG.ACL),
CPUAllocationParameter: types.StringValue(recordRG.CPUAllocationParameter),
CPUAllocationRatio: types.Float64Value(recordRG.CPUAllocationRatio),
DefNetID: types.Int64Value(recordRG.DefNetID),
DeletedBy: types.StringValue(recordRG.DeletedBy),
DeletedTime: types.Int64Value(int64(recordRG.DeletedTime)),
GUID: types.Int64Value(int64(recordRG.GUID)),
Id: types.StringValue(strconv.Itoa(int(recordRG.ID))),
LockStatus: types.StringValue(recordRG.LockStatus),
Milestones: types.Int64Value(int64(recordRG.Milestones)),
Secret: types.StringValue(recordRG.Secret),
Status: types.StringValue(recordRG.Status),
UpdatedBy: types.StringValue(recordRG.UpdatedBy),
UpdatedTime: types.Int64Value(int64(recordRG.UpdatedTime)),
ResourceTypes: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &recordRG.ResTypes),
VINS: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &recordRG.VINS),
VMS: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &recordRG.VMs),
}
tflog.Info(ctx, "flattenResourceRG: after flatten", map[string]any{"rg_id": plan.Id.ValueString()})
tflog.Info(ctx, "End FlattenRGResource")
return nil
}

View File

@@ -0,0 +1,41 @@
package rg
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/ic"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
)
func resourceRgInputChecks(ctx context.Context, plan *models.ResourceRGModel, c *decort.DecortClient) diag.Diagnostics {
diags := diag.Diagnostics{}
accountId := uint64(plan.AccountID.ValueInt64())
tflog.Info(ctx, "resourceRgInputChecks: exist account check", map[string]any{"account_id": accountId})
err := ic.ExistAccount(ctx, accountId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about account with ID %v", accountId), err.Error())
}
gid := uint64(plan.GID.ValueInt64())
tflog.Info(ctx, "resourceRgInputChecks: exist gid check", map[string]any{"gid": gid})
err = ic.ExistGID(ctx, gid, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about gid with ID %v", gid), err.Error())
}
if !plan.ExtNetID.IsNull() {
extnetId := uint64(plan.ExtNetID.ValueInt64())
tflog.Info(ctx, "resourceRgInputChecks: exist ext_net check", map[string]any{"ext_net_id": extnetId})
err = ic.ExistExtNetInRG(ctx, extnetId, accountId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about ext net with ID %v", extnetId), err.Error())
}
}
return diags
}

View File

@@ -9,7 +9,6 @@ import (
type DataSourceRGModel struct {
// request fields
RGID types.Int64 `tfsdk:"rg_id"`
Reason types.String `tfsdk:"reason"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields

View File

@@ -8,7 +8,6 @@ import (
type DataSourceRGUsageModel struct {
// request fields
RGID types.Int64 `tfsdk:"rg_id"`
Reason types.String `tfsdk:"reason"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields

View File

@@ -0,0 +1,76 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type ResourceRGModel struct {
// request fields - required
AccountID types.Int64 `tfsdk:"account_id"`
GID types.Int64 `tfsdk:"gid"`
Name types.String `tfsdk:"rg_name"`
// request fields - optional
DefNetType types.String `tfsdk:"def_net_type"`
IPCIDR types.String `tfsdk:"ipcidr"`
ExtNetID types.Int64 `tfsdk:"ext_net_id"`
ExtIP types.String `tfsdk:"ext_ip"`
Owner types.String `tfsdk:"owner"`
ResourceLimits types.Object `tfsdk:"resource_limits"`
CPUAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"`
CPUAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"`
Access types.List `tfsdk:"access"`
DefNet types.Object `tfsdk:"def_net"`
Description types.String `tfsdk:"description"`
Force types.Bool `tfsdk:"force"`
Permanently types.Bool `tfsdk:"permanently"`
RegisterComputes types.Bool `tfsdk:"register_computes"`
Restore types.Bool `tfsdk:"restore"`
Enable types.Bool `tfsdk:"enable"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
RGID types.Int64 `tfsdk:"rg_id"`
LastUpdated types.String `tfsdk:"last_updated"`
AccountName types.String `tfsdk:"account_name"`
ACL types.List `tfsdk:"acl"`
ComputeFeatures types.List `tfsdk:"compute_features"`
CreatedBy types.String `tfsdk:"created_by"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DefNetID types.Int64 `tfsdk:"def_net_id"`
DeletedBy types.String `tfsdk:"deleted_by"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
GUID types.Int64 `tfsdk:"guid"`
Id types.String `tfsdk:"id"`
LockStatus types.String `tfsdk:"lock_status"`
Milestones types.Int64 `tfsdk:"milestones"`
Secret types.String `tfsdk:"secret"`
Status types.String `tfsdk:"status"`
UniqPools types.List `tfsdk:"uniq_pools"`
UpdatedBy types.String `tfsdk:"updated_by"`
UpdatedTime types.Int64 `tfsdk:"updated_time"`
ResourceTypes types.List `tfsdk:"resource_types"`
VINS types.List `tfsdk:"vins"`
VMS types.List `tfsdk:"vms"`
}
type AccessModel struct {
User types.String `tfsdk:"user"`
Right types.String `tfsdk:"right"`
}
type DefNetModel struct {
NetType types.String `tfsdk:"net_type"`
NetId types.Int64 `tfsdk:"net_id"`
}
// Contains returns true if accessList contains a as an element. Otherwise it returns false.
func (a *AccessModel) Contains(accessList []AccessModel) bool {
for _, accessElem := range accessList {
if a.User.Equal(accessElem.User) && a.Right.Equal(accessElem.Right) {
return true
}
}
return false
}

View File

@@ -0,0 +1,457 @@
package rg
import (
"context"
"fmt"
"reflect"
"strconv"
"time"
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ resource.Resource = &resourceRG{}
_ resource.ResourceWithImportState = &resourceRG{}
)
// NewResourceRG is a helper function to simplify the provider implementation.
func NewResourceRG() resource.Resource {
return &resourceRG{}
}
// resourceRG is the resource implementation.
type resourceRG struct {
client *decort.DecortClient
}
// Create creates the resource and sets the initial Terraform state.
func (r *resourceRG) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
// Get plan to create resource group
var plan models.ResourceRGModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceRG: Error receiving the plan")
return
}
tflog.Info(ctx, "Create resourceRG: got plan successfully", map[string]any{"name": plan.Name.ValueString()})
tflog.Info(ctx, "Create resourceRG: start creating", map[string]any{"name": plan.Name.ValueString()})
// Set timeouts
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Create resourceRG: Error set timeout")
return
}
tflog.Info(ctx, "Create resourceRG: set timeouts successfully", map[string]any{
"name": plan.Name.ValueString(),
"createTimeout": createTimeout})
ctx, cancel := context.WithTimeout(ctx, createTimeout)
defer cancel()
// Check if input values are valid in the platform
tflog.Info(ctx, "Create resourceRG: starting input checks", map[string]any{"name": plan.Name.ValueString()})
resp.Diagnostics.Append(resourceRgInputChecks(ctx, &plan, r.client)...)
if diags.HasError() {
tflog.Error(ctx, "Create resourceRG: Error input checks")
return
}
tflog.Info(ctx, "Create resourceRG: input checks successful", map[string]any{"name": plan.Name.ValueString()})
// Make create request and get response
createReq, diags := utilities.CreateRequestResourceRG(ctx, &plan)
resp.Diagnostics.Append(diags...)
if diags.HasError() {
tflog.Error(ctx, "Create resourceRG: Error response for create request of resource rg")
return
}
tflog.Info(ctx, "Create resourceRG: before call CloudBroker().RG().Create", map[string]any{"req": createReq})
rgId, err := r.client.CloudBroker().RG().Create(ctx, createReq)
if err != nil {
resp.Diagnostics.AddError(
"Create resourceRG: unable to Create RG",
err.Error(),
)
return
}
plan.Id = types.StringValue(strconv.Itoa(int(rgId)))
tflog.Info(ctx, "Create resourceRG: resource group created", map[string]any{"rgId": rgId, "name": plan.Name.ValueString()})
// additional settings after rg creation: in case of failures, warnings are added to resp.Diagnostics,
// because additional settings failure is not critical. If errors were added instead of warnings, terraform
// framework would mark resource as tainted and delete it, which would be unwanted behaviour.
// update compute features if needed, warnings added to resp.Diagnostics in case of failure
if !plan.ComputeFeatures.IsUnknown() { // ComputeFeatures is optional && computed
diags := utilities.UpdateComputeFeature(ctx, rgId, &plan, r.client)
for _, d := range diags {
if d.Severity() == diag.SeverityError {
resp.Diagnostics.AddWarning(d.Summary(), d.Detail())
}
}
}
// grant access to resource group if needed, warnings added to resp.Diagnostics in case of failure.
resp.Diagnostics.Append(utilities.AccessCreateRG(ctx, rgId, &plan, r.client)...)
// set def_net for resource group if needed, warnings added to resp.Diagnostics in case of failure.
resp.Diagnostics.Append(utilities.SetDefNetCreateRG(ctx, rgId, &plan, r.client)...)
// set compute allocation parameter if needed, warnings added to resp.Diagnostics in case of failure
if !plan.CPUAllocationParameter.IsUnknown() { // CPUAllocationParameter is optional && computed
diags := utilities.UpdateCpuAllocationParameter(ctx, rgId, &plan, r.client)
for _, d := range diags {
if d.Severity() == diag.SeverityError {
resp.Diagnostics.AddWarning(d.Summary(), d.Detail())
}
}
}
// set compute allocation ratio if needed, warnings added to resp.Diagnostics in case of failure
if !plan.CPUAllocationRatio.IsUnknown() { // CPUAllocationParameter is optional && computed
diags := utilities.UpdateCpuAllocationRatio(ctx, rgId, &plan, r.client)
for _, d := range diags {
if d.Severity() == diag.SeverityError {
resp.Diagnostics.AddWarning(d.Summary(), d.Detail())
}
}
}
// enable/disable of resource group after creation, warnings added to resp.Diagnostics in case of failure.
resp.Diagnostics.Append(utilities.EnableDisableCreateRG(ctx, rgId, &plan, r.client)...)
tflog.Info(ctx, "Create resourceRG: resource creation is completed", map[string]any{"rg_id": rgId})
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.RGResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set data last update
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
// Set state to fully populated data
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
if resp.Diagnostics.HasError() {
return
}
}
// Read refreshes the Terraform state with the latest data.
func (r *resourceRG) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
// Get current state
var state models.ResourceRGModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceRG: Error get state")
return
}
tflog.Info(ctx, "Read resourceRG: got state successfully", map[string]any{"rg_id": state.Id.ValueString()})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceRG: Error set timeout")
return
}
tflog.Info(ctx, "Read resourceRG: set timeouts successfully", map[string]any{
"rg_id": state.Id.ValueString(),
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// read status
resp.Diagnostics.Append(utilities.RGReadStatus(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceRG: Error reading resource group status")
return
}
// Overwrite items with refreshed state
resp.Diagnostics.Append(flattens.RGResource(ctx, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceRG: Error flatten resource group")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read resourceRG: Error set state")
return
}
tflog.Info(ctx, "End read resource group")
}
// Update updates the resource and sets the updated Terraform state on success.
func (r *resourceRG) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
// Retrieve values from plan
var plan models.ResourceRGModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceRG: Error receiving the plan")
return
}
tflog.Info(ctx, "Update resourceRG: got plan successfully", map[string]any{"rg_id": plan.Id.ValueString()})
// Retrieve values from state
var state models.ResourceRGModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceRG: Error receiving the state")
return
}
tflog.Info(ctx, "Update resourceRG: got state successfully", map[string]any{"rg_id": state.Id.ValueString()})
// Set timeouts
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout600s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Error set timeout")
return
}
tflog.Info(ctx, "Update resourceRG: set timeouts successfully", map[string]any{
"rg_id": state.Id.ValueString(),
"updateTimeout": updateTimeout})
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
defer cancel()
// Checking for values in the platform
tflog.Info(ctx, "Update resourceRG: starting input checks", map[string]any{"rg_id": plan.Id.ValueString()})
resp.Diagnostics.Append(resourceRgInputChecks(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceRG: Error input checks")
return
}
tflog.Info(ctx, "Update resourceRG: input checks successful", map[string]any{"rg_id": state.Id.ValueString()})
rgId, err := strconv.ParseUint(state.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("Cannot parse resource group ID from state", err.Error())
return
}
// Get current resource group values
recordRG, err := utilities.RGCheckPresence(ctx, rgId, r.client)
if err != nil {
resp.Diagnostics.AddError(
"Update resourceRG: unable to Update RG after input checks",
err.Error(),
)
return
}
tflog.Info(ctx, "Update resourceRG: check status for RG", map[string]any{"rg_id": recordRG.ID, "status": recordRG.Status})
// Validate if changes in plan are allowed
tflog.Info(ctx, "Update resourceRG: checking def_net is not empty in case of change", map[string]any{
"rg_id": state.Id.ValueString()})
if !state.DefNet.IsNull() && plan.DefNet.IsNull() {
resp.Diagnostics.AddError(
"Update resourceRG: Invalid input provided",
fmt.Sprintf("block def_net must not be empty for resource with rg_id %d", recordRG.ID),
)
return
}
tflog.Info(ctx, "Update resourceRG: checking def_net_type, ipcidr, ext_ip are not changed", map[string]any{
"rg_id": state.Id.ValueString(),
"def_net_type_plan": plan.DefNetType.ValueString(),
"def_net_type_state": state.DefNetType.ValueString(),
"ipcidr_plan": plan.IPCIDR.ValueString(),
"ipcidr_state": state.IPCIDR.ValueString(),
"ext_ip_plan": plan.ExtIP.ValueString(),
"ext_ip_state": state.ExtIP.ValueString(),
})
if !plan.DefNetType.Equal(state.DefNetType) {
resp.Diagnostics.AddError(
"Update resourceRG: Invalid input provided. Warning can be ignored if resource was imported.",
fmt.Sprintf("block def_net_type must not be changed for resource with rg_id %d", recordRG.ID),
)
return
}
if !plan.IPCIDR.Equal(state.IPCIDR) {
resp.Diagnostics.AddError(
"Update resourceRG: Invalid input provided",
fmt.Sprintf("block ipcidr must not be changed for resource with rg_id %d", recordRG.ID),
)
return
}
if !plan.ExtIP.Equal(state.ExtIP) {
resp.Diagnostics.AddError(
"Update resourceRG: Invalid input provided",
fmt.Sprintf("block ext_ip must not be changed for resource with rg_id %d", recordRG.ID),
)
return
}
if !plan.ExtNetID.Equal(state.ExtNetID) {
resp.Diagnostics.AddError(
"Update resourceRG: Invalid input provided",
fmt.Sprintf("block ext_net_id must not be changed for resource with rg_id %d", recordRG.ID),
)
return
}
// update RG if any of the fields name, description, register_computes or quota has been changed
resp.Diagnostics.Append(utilities.UpdateRG(ctx, rgId, &plan, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceRG: Error updating rg")
return
}
// grant/revoke access for RG
if !reflect.DeepEqual(plan.Access, state.Access) {
resp.Diagnostics.Append(utilities.AccessUpdateRG(ctx, rgId, &plan, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceRG: Error grant/revoke access for rg")
return
}
}
// set new def_net is needed
if !reflect.DeepEqual(plan.DefNet, state.DefNet) {
resp.Diagnostics.Append(utilities.SetDefNetUpdateRG(ctx, rgId, &plan, &state, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceRG: Unable to setDefNet for RG")
return
}
}
// set new cpu allocation parameter is needed
if !plan.CPUAllocationParameter.IsUnknown() && !plan.CPUAllocationParameter.Equal(state.CPUAllocationParameter) {
resp.Diagnostics.Append(utilities.UpdateCpuAllocationParameter(ctx, rgId, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceRG: Unable to setDefNet for RG")
return
}
}
// set new cpu allocation ratio is needed
if !plan.CPUAllocationRatio.IsUnknown() && !plan.CPUAllocationRatio.Equal(state.CPUAllocationRatio) {
resp.Diagnostics.Append(utilities.UpdateCpuAllocationRatio(ctx, rgId, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceRG: Unable to setDefNet for RG")
return
}
}
// enable or disable RG
if !plan.Enable.Equal(state.Enable) {
resp.Diagnostics.Append(utilities.EnableDisableUpdateRG(ctx, rgId, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceRG: Error enable/disable rg")
return
}
}
tflog.Info(ctx, "Update resourceRG: resource update is completed", map[string]any{"rg_id": plan.Id.ValueString()})
// Map response body to schema and populate Computed attribute values
resp.Diagnostics.Append(flattens.RGResource(ctx, &plan, r.client)...)
if resp.Diagnostics.HasError() {
return
}
// Set data last update
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
// Set state to fully populated data
diags = resp.State.Set(ctx, plan)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
// Delete deletes the resource and removes the Terraform state on success.
func (r *resourceRG) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
// Get current state
var state models.ResourceRGModel
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceRG: Error get state")
return
}
tflog.Info(ctx, "Delete resourceRG: got state successfully", map[string]any{"rg_id": state.Id.ValueString()})
// Set timeouts
deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Delete resourceRG: Error set timeout")
return
}
tflog.Info(ctx, "Delete resourceRG: set timeouts successfully", map[string]any{
"rg_id": state.Id.ValueString(),
"deleteTimeout": deleteTimeout})
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
defer cancel()
// Delete existing resource group
delReq := rg.DeleteRequest{
RGID: uint64(state.RGID.ValueInt64()),
Force: state.Force.ValueBool(),
Permanently: state.Permanently.ValueBool(),
}
_, err := r.client.CloudAPI().RG().Delete(ctx, delReq)
if err != nil {
resp.Diagnostics.AddError("Delete resourceRG: Error deleting resource group with error: ", err.Error())
return
}
tflog.Info(ctx, "End delete resource group", map[string]any{"rg_id": state.Id.ValueString()})
}
// Schema defines the schema for the resource.
func (r *resourceRG) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaResourceRG(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
},
}
}
// Metadata returns the resource type name.
func (r *resourceRG) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_cb_rg"
}
// Configure adds the provider configured client to the resource.
func (r *resourceRG) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure resourceRG")
r.client = client.Resource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure resourceRG successfully")
}
func (r *resourceRG) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
// Retrieve import ID and save to id attribute
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
}

View File

@@ -13,12 +13,6 @@ func MakeSchemaDataSourceRG() map[string]schema.Attribute {
Description: "resource group id",
},
// optional attributes
"reason": schema.StringAttribute{
Optional: true,
Description: "reason for request",
},
//computed attributes
"account_id": schema.Int64Attribute{
Computed: true,

Some files were not shown because too many files have changed in this diff Show More