This commit is contained in:
asteam
2024-12-04 13:18:58 +03:00
parent 003e4d656e
commit 76ea459b3d
417 changed files with 30051 additions and 975 deletions

View File

@@ -3,8 +3,10 @@ package provider
import (
"github.com/hashicorp/terraform-plugin-framework/datasource"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/audit"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/extnet"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/flipgroup"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/image"
@@ -17,6 +19,8 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/vins"
cbAccount "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account"
cbAudit "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit"
cbDisks "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks"
cbK8ci "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci"
cbLb "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb"
cbNode "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/node"
cbRG "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg"
@@ -43,6 +47,8 @@ func newDataSourcesMap() []func() datasource.DataSource {
account.NewDataSourceAccountReservedUnits,
account.NewDataSourceAccountGetResourceConsumptionList,
audit.NewDataSourceAudit,
bservice.NewDataSourceBService,
bservice.NewDataSourceBServiceList,
bservice.NewDataSourceBServiceGroup,
@@ -59,6 +65,9 @@ func newDataSourcesMap() []func() datasource.DataSource {
disks.NewDataSourceDiskSnapshotList,
disks.NewDataSourceDiskReplication,
dpdknet.NewDataSourceDPDK,
dpdknet.NewDataSourceDPDKList,
extnet.NewDataSourceExtNet,
extnet.NewDataSourceExtNetComputesList,
extnet.NewDataSourceExtNetDefault,
@@ -147,9 +156,23 @@ func newDataSourcesMap() []func() datasource.DataSource {
cbAudit.NewDataSourceAuditLinkedJobs,
cbAudit.NewDataSourceAuditList,
cbDisks.NewDataSourceDiskListDeleted,
cbDisks.NewDataSourceDiskListTypesDetailed,
cbDisks.NewDataSourceDiskListTypes,
cbDisks.NewDataSourceDiskListUnattached,
cbDisks.NewdataSourceDiskList,
cbDisks.NewDataSourceDiskReplication,
cbDisks.NewDataSourceDiskSnapshotList,
cbDisks.NewDataSourceDiskSnapshot,
cbDisks.NewDataSourceDisk,
cbNode.NewDataSourceNode,
cbNode.NewDataSourceNodeList,
cbK8ci.NewDataSourceK8CI,
cbK8ci.NewDataSourceK8CIList,
cbK8ci.NewDataSourceK8CIListDeleted,
cbLb.NewDataSourceLB,
cbLb.NewDataSourceLBList,
cbLb.NewDataSourceLBListDeleted,

View File

@@ -142,7 +142,7 @@ func (p *DynamixProvider) Configure(ctx context.Context, req provider.ConfigureR
return
}
// Set up default values, values from env and save user provided values into decortConfig
// Set up default values, values from env and save user provided values into dynamixConfig
dynamixConfig := dynamixProviderConfig{}
dynamixConfig.new(config)

View File

@@ -12,8 +12,11 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/lb"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/vins"
cbLb "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb"
cbAccount "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account"
cbDisks "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks"
cbK8CI "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci"
cbLb "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb"
cbRG "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg"
cbVFpool "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool"
cbVins "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins"
)
@@ -43,11 +46,18 @@ func newResourcesMap() []func() resource.Resource {
cbAccount.NewResourceAccount,
cbDisks.NewResourceDiskReplications,
cbDisks.NewResourceDiskSnapshot,
cbDisks.NewResourceDisk,
cbK8CI.NewResourceK8Ci,
cbLb.NewResourceLB,
cbLb.NewResourceLBBackend,
cbLb.NewResourceLBBackendServer,
cbLb.NewResourceLBFrontend,
cbLb.NewResourceLBFrontendBind,
cbRG.NewResourceRG,
cbVFpool.NewResourceVFPool,
cbVins.NewResourceVINS,
cbVins.NewResourceVINSStaticRoute,

View File

@@ -37,6 +37,7 @@ func AccountFlipgroupsListDataSource(ctx context.Context, state *models.DataSour
ExtNetID: state.ExtNetID,
ByIP: state.ByIP,
FLIPGroupID: state.FLIPGroupID,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,

View File

@@ -14,6 +14,7 @@ type DataSourceAccountFlipgroupsListModel struct {
ExtNetID types.Int64 `tfsdk:"extnet_id"`
ByIP types.String `tfsdk:"by_ip"`
FLIPGroupID types.Int64 `tfsdk:"flipgroup_id"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Timeouts timeouts.Value `tfsdk:"timeouts"`

View File

@@ -50,9 +50,8 @@ type ResourceAccountModel struct {
}
type UsersModel struct {
UserID types.String `tfsdk:"user_id"`
AccessType types.String `tfsdk:"access_type"`
RecursiveDelete types.Bool `tfsdk:"recursive_delete"`
UserID types.String `tfsdk:"user_id"`
AccessType types.String `tfsdk:"access_type"`
}
type ResourceLimitsInAccountResourceModel struct {

View File

@@ -37,6 +37,10 @@ func MakeSchemaDataSourceAccountFlipgroupsList() map[string]schema.Attribute {
Optional: true,
Description: "find by flipgroup id",
},
"sort_by": schema.StringAttribute{
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": schema.Int64Attribute{
Optional: true,
Description: "page number",

View File

@@ -38,10 +38,6 @@ func MakeSchemaResourceAccount() map[string]schema.Attribute {
"access_type": schema.StringAttribute{
Required: true,
},
"recursive_delete": schema.BoolAttribute{
Optional: true,
// default is false
},
},
},
},

View File

@@ -33,6 +33,9 @@ func AccountFlipgroupsListCheckPresence(ctx context.Context, plan *models.DataSo
if !plan.FLIPGroupID.IsNull() {
flipgroupsListReq.FLIPGroupID = uint64(plan.FLIPGroupID.ValueInt64())
}
if !plan.SortBy.IsNull() {
flipgroupsListReq.SortBy = plan.SortBy.ValueString()
}
if !plan.Page.IsNull() {
flipgroupsListReq.Page = uint64(plan.Page.ValueInt64())
}

View File

@@ -298,9 +298,8 @@ func AddDeleteUsersAccount(ctx context.Context, accountId uint64, plan, state *m
for _, user := range deletedUsers {
delUserReq := account.DeleteUserRequest{
AccountID: accountId,
UserID: user.UserID.ValueString(),
RecursiveDelete: user.RecursiveDelete.ValueBool(), // default false
AccountID: accountId,
UserID: user.UserID.ValueString(),
}
tflog.Info(ctx, "AddDeleteUsersAccount: before calling CloudAPI().Account().DeleteUser", map[string]any{"account_id": accountId, "req": delUserReq})

View File

@@ -0,0 +1,91 @@
package audit
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/audit/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/audit/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/audit/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceAudit{}
)
func NewDataSourceAudit() datasource.DataSource {
return &dataSourceAudit{}
}
// dataSourceAudit is the data source implementation.
type dataSourceAudit struct {
client *decort.DecortClient
}
func (d *dataSourceAudit) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceAudit
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAudit: Error get state")
return
}
auditGuid := state.AuditGuid.ValueString()
tflog.Info(ctx, "Read dataSourceAudit: got state successfully", map[string]any{"audit_guid": auditGuid})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAudit: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceAudit: set timeouts successfully", map[string]any{
"audit_guid": auditGuid,
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.AuditDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAudit: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceAudit: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceAudit", map[string]any{"audit_guid": auditGuid})
}
func (d *dataSourceAudit) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceAudit(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceAudit) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_audit"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceAudit) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceAudit")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceAudit successfully")
}

View File

@@ -0,0 +1,49 @@
package flattens
import (
"context"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/audit/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/audit/utilities"
)
func AuditDataSource(ctx context.Context, state *models.DataSourceAudit, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.AuditDataSource")
diags := diag.Diagnostics{}
auditGuid := state.AuditGuid.ValueString()
recordAudit, diags := utilities.AuditDataSourceCheckPresence(ctx, auditGuid, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.AuditDataSource: before flatten", map[string]any{"audit_guid": auditGuid})
*state = models.DataSourceAudit{
AuditGuid: state.AuditGuid,
Timeouts: state.Timeouts,
Arguments: types.StringValue(recordAudit.Arguments),
Call: types.StringValue(recordAudit.Call),
GUID: types.StringValue(recordAudit.GUID),
ID: types.StringValue(recordAudit.GUID),
Kwargs: types.StringValue(recordAudit.Kwargs),
RemoteAddr: types.StringValue(recordAudit.RemoteAddr),
ResponseTime: types.Float64Value(recordAudit.ResponseTime),
Result: types.StringValue(recordAudit.Result),
StatusCode: types.Int64Value(int64(recordAudit.StatusCode)),
Tags: types.StringValue(recordAudit.Tags),
Timestamp: types.Float64Value(recordAudit.Timestamp),
TimestampEnd: types.Float64Value(recordAudit.TimestampEnd),
User: types.StringValue(recordAudit.User),
}
tflog.Info(ctx, "End flattens.AuditDataSource", map[string]any{"audit_guid": auditGuid})
return nil
}

View File

@@ -0,0 +1,27 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceAudit struct {
//required field
AuditGuid types.String `tfsdk:"audit_guid"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
//response field
Arguments types.String `tfsdk:"args"`
Call types.String `tfsdk:"call"`
GUID types.String `tfsdk:"guid"`
ID types.String `tfsdk:"id"`
Kwargs types.String `tfsdk:"kwargs"`
RemoteAddr types.String `tfsdk:"remote_addr"`
ResponseTime types.Float64 `tfsdk:"responsetime"`
Result types.String `tfsdk:"result"`
StatusCode types.Int64 `tfsdk:"status_code"`
Tags types.String `tfsdk:"tags"`
Timestamp types.Float64 `tfsdk:"timestamp"`
TimestampEnd types.Float64 `tfsdk:"timestamp_end"`
User types.String `tfsdk:"user"`
}

View File

@@ -0,0 +1,52 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
func MakeSchemaDataSourceAudit() map[string]schema.Attribute {
return map[string]schema.Attribute{
"audit_guid": schema.StringAttribute{
Required: true,
},
"args": schema.StringAttribute{
Computed: true,
},
"call": schema.StringAttribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
"kwargs": schema.StringAttribute{
Computed: true,
},
"remote_addr": schema.StringAttribute{
Computed: true,
},
"responsetime": schema.Float64Attribute{
Computed: true,
},
"result": schema.StringAttribute{
Computed: true,
},
"status_code": schema.Int64Attribute{
Computed: true,
},
"tags": schema.StringAttribute{
Computed: true,
},
"timestamp": schema.Float64Attribute{
Computed: true,
},
"timestamp_end": schema.Float64Attribute{
Computed: true,
},
"user": schema.StringAttribute{
Computed: true,
},
"id": schema.StringAttribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,27 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/audit"
)
func AuditDataSourceCheckPresence(ctx context.Context, auditGuid string, c *decort.DecortClient) (*audit.RecordAudit, diag.Diagnostics) {
tflog.Info(ctx, fmt.Sprintf("AuditDataSourceCheckPresence: Get info about audit with ID - %v", auditGuid))
diags := diag.Diagnostics{}
recordAudit, err := c.CloudAPI().Audit().Get(ctx, audit.GetRequest{AuditGuid: auditGuid})
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about audit with ID %v", auditGuid), err.Error())
return nil, diags
}
tflog.Info(ctx, "AuditDataSourceCheckPresence: response from CloudBroker().Audit().Get", map[string]any{"audit_guid": auditGuid, "response": recordAudit})
return recordAudit, nil
}

View File

@@ -52,7 +52,6 @@ func DiskResource(ctx context.Context, plan *models.ResourceDiskModel, c *decort
Type: plan.Type,
Detach: plan.Detach,
Permanently: plan.Permanently,
Reason: plan.Reason,
Shareable: plan.Shareable,
Timeouts: plan.Timeouts,

View File

@@ -45,7 +45,6 @@ func DiskReplicationResource(ctx context.Context, state *models.ResourceRecordDi
Start: state.Start,
Detach: state.Detach,
Permanently: state.Permanently,
Reason: state.Reason,
Id: types.StringValue(strconv.Itoa(int(recordDisk.Replication.DiskID))),
ACL: types.StringValue(string(diskAcl)),
AccountID: types.Int64Value(int64(recordDisk.AccountID)),

View File

@@ -20,7 +20,6 @@ type ResourceDiskModel struct {
Type types.String `tfsdk:"type"`
Detach types.Bool `tfsdk:"detach"`
Permanently types.Bool `tfsdk:"permanently"`
Reason types.String `tfsdk:"reason"`
Shareable types.Bool `tfsdk:"shareable"`
IOTune types.Object `tfsdk:"iotune"`
Timeouts timeouts.Value `tfsdk:"timeouts"`

View File

@@ -17,7 +17,6 @@ type ResourceRecordDiskReplicationModel struct {
Start types.Bool `tfsdk:"start"`
Detach types.Bool `tfsdk:"detach"`
Permanently types.Bool `tfsdk:"permanently"`
Reason types.String `tfsdk:"reason"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields

View File

@@ -312,10 +312,6 @@ func (r *resourceDisk) Delete(ctx context.Context, req resource.DeleteRequest, r
Permanently: state.Permanently.ValueBool(), // default false
}
if !state.Reason.IsNull() {
delReq.Reason = state.Reason.ValueString()
}
tflog.Info(ctx, "Delete resourceDisk: before call CloudAPI().Disks().Delete", map[string]any{"req": delReq})
_, err := r.client.CloudAPI().Disks().Delete(ctx, delReq)
if err != nil {

View File

@@ -284,10 +284,6 @@ func (r *resourceDiskReplication) Delete(ctx context.Context, req resource.Delet
Permanently: state.Permanently.ValueBool(), // default false
}
if !state.Reason.IsNull() {
delReq.Reason = state.Reason.ValueString()
}
tflog.Info(ctx, "Delete resourceDiskReplication: before call CloudAPI().Disks().Delete", map[string]any{"req": delReq})
_, err := r.client.CloudAPI().Disks().Delete(ctx, delReq)
if err != nil {

View File

@@ -64,10 +64,6 @@ func MakeSchemaResourceDisk() map[string]schema.Attribute {
Description: "Whether to completely delete the disk, works only with non attached disks",
// default is false
},
"reason": schema.StringAttribute{
Optional: true,
Description: "Reason for deletion",
},
"shareable": schema.BoolAttribute{
Optional: true,
Computed: true,

View File

@@ -39,9 +39,6 @@ func MakeSchemaResourceDiskReplication() map[string]schema.Attribute {
"permanently": schema.BoolAttribute{
Optional: true,
},
"reason": schema.StringAttribute{
Optional: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,

View File

@@ -193,7 +193,6 @@ func RestoreDisk(ctx context.Context, diskId uint64, c *decort.DecortClient) dia
restoreReq := disks.RestoreRequest{
DiskID: diskId,
Reason: "Terraform automatic restore",
}
tflog.Info(ctx, "RestoreDisk: before calling CloudAPI().Disks().Restore", map[string]any{"diskId": diskId, "req": restoreReq})

View File

@@ -0,0 +1,91 @@
package dpdknet
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDPDK{}
)
func NewDataSourceDPDK() datasource.DataSource {
return &dataSourceDPDK{}
}
// dataSourceDPDK is the data source implementation.
type dataSourceDPDK struct {
client *decort.DecortClient
}
func (d *dataSourceDPDK) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceDPDKModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDPDK: Error get state")
return
}
dpdkId := uint64(state.DPDKID.ValueInt64())
tflog.Info(ctx, "Read dataSourceDPDK: got state successfully", map[string]any{"dpdk_id": dpdkId})
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDPDK: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDPDK: set timeouts successfully", map[string]any{
"dpdk_id": dpdkId,
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DPDKDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDPDK: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDPDK: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDPDK", map[string]any{"dpdk_id": dpdkId})
}
func (d *dataSourceDPDK) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDPDK(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDPDK) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_dpdknet"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDPDK) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDPDK")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDPDK successfully")
}

View File

@@ -0,0 +1,88 @@
package dpdknet
import (
"context"
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/schemas"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &dataSourceDPDKList{}
)
func NewDataSourceDPDKList() datasource.DataSource {
return &dataSourceDPDKList{}
}
// dataSourceDPDKList is the data source implementation.
type dataSourceDPDKList struct {
client *decort.DecortClient
}
func (d *dataSourceDPDKList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
// Read Terraform configuration data into the model
var state models.DataSourceDPDKListModel
resp.Diagnostics.Append(req.Config.Get(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDPDKList: Error get state")
return
}
// Set timeouts
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDPDKList: Error set timeout")
return
}
tflog.Info(ctx, "Read dataSourceDPDKList: set timeouts successfully", map[string]any{
"readTimeout": readTimeout})
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
// Map response body to schema
resp.Diagnostics.Append(flattens.DPDKListDataSource(ctx, &state, d.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDPDKList: Error flatten")
return
}
// Set refreshed state
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Read dataSourceDPDKList: Error set state")
return
}
tflog.Info(ctx, "End read dataSourceDPDKList")
}
func (d *dataSourceDPDKList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: schemas.MakeSchemaDataSourceDPDKList(),
Blocks: map[string]schema.Block{
"timeouts": timeouts.Block(ctx),
},
}
}
func (d *dataSourceDPDKList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_dpdknet_list"
}
// Configure adds the provider configured client to the data source.
func (d *dataSourceDPDKList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
tflog.Info(ctx, "Get Configure dataSourceDPDKList")
d.client = client.DataSource(ctx, &req, resp)
tflog.Info(ctx, "Getting Configure dataSourceDPDKList successfully")
}

View File

@@ -0,0 +1,52 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/utilities"
)
func DPDKDataSource(ctx context.Context, state *models.DataSourceDPDKModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DPDKDataSource")
diags := diag.Diagnostics{}
dpdkId := uint64(state.DPDKID.ValueInt64())
recordDPDK, diags := utilities.DPDKDataSourceCheckPresence(ctx, dpdkId, c)
if diags.HasError() {
return diags
}
tflog.Info(ctx, "flattens.DPDKDataSource: before flatten", map[string]any{"dpdk_id": dpdkId})
id := uuid.New()
*state = models.DataSourceDPDKModel{
DPDKID: state.DPDKID,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
AccountAccess: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, recordDPDK.AccountAccess),
CreatedTime: types.Int64Value(int64(recordDPDK.CreatedTime)),
Desc: types.StringValue(recordDPDK.Description),
GID: types.Int64Value(int64(recordDPDK.GID)),
GUID: types.Int64Value(int64(recordDPDK.GUID)),
Name: types.StringValue(recordDPDK.Name),
RGAccess: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, recordDPDK.RGAccess),
Status: types.StringValue(recordDPDK.Status),
OVSBridge: types.StringValue(recordDPDK.OVSBridge),
VlanID: types.Int64Value(int64(recordDPDK.VlanID)),
ComputeIDs: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, recordDPDK.ComputeIDs),
UpdatedTime: types.Int64Value(int64(recordDPDK.UpdatedTime)),
}
tflog.Info(ctx, "End flattens.DPDKDataSource", map[string]any{"dpdk_id": dpdkId})
return nil
}

View File

@@ -0,0 +1,72 @@
package flattens
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/utilities"
)
func DPDKListDataSource(ctx context.Context, state *models.DataSourceDPDKListModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "Start flattens.DPDKListDataSource")
diags := diag.Diagnostics{}
dpdkList, err := utilities.DPDKListDataSourceCheckPresence(ctx, state, c)
if err != nil {
diags.AddError("Cannot get info about list DPDK", err.Error())
return diags
}
tflog.Info(ctx, "flattens.DPDKListDataSource: before flatten")
id := uuid.New()
*state = models.DataSourceDPDKListModel{
ByID: state.ByID,
GID: state.GID,
Name: state.Name,
Desc: state.Desc,
Status: state.Status,
ComputeIDs: state.ComputeIDs,
SortBy: state.SortBy,
Page: state.Page,
Size: state.Size,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
}
items := make([]models.ItemDPDKListModel, 0, len(dpdkList.Data))
for _, dpdkItem := range dpdkList.Data {
item := models.ItemDPDKListModel{
AccountAccess: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, dpdkItem.AccountAccess),
CreatedTime: types.Int64Value(int64(dpdkItem.CreatedTime)),
Desc: types.StringValue(dpdkItem.Description),
DPDKID: types.Int64Value(int64(dpdkItem.ID)),
GID: types.Int64Value(int64(dpdkItem.GID)),
GUID: types.Int64Value(int64(dpdkItem.GUID)),
Name: types.StringValue(dpdkItem.Name),
RGAccess: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, dpdkItem.RGAccess),
Status: types.StringValue(dpdkItem.Status),
OVSBridge: types.StringValue(dpdkItem.OVSBridge),
VlanID: types.Int64Value(int64(dpdkItem.VlanID)),
ComputeIDs: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, dpdkItem.ComputeIDs),
UpdatedTime: types.Int64Value(int64(dpdkItem.UpdatedTime)),
}
items = append(items, item)
}
state.Items = items
state.EntryCount = types.Int64Value(int64(dpdkList.EntryCount))
tflog.Info(ctx, "End flattens.DPDKListDataSource")
return nil
}

View File

@@ -0,0 +1,27 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceDPDKModel struct {
//required field
DPDKID types.Int64 `tfsdk:"dpdk_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
//response field
AccountAccess types.List `tfsdk:"account_access"`
CreatedTime types.Int64 `tfsdk:"created_time"`
Desc types.String `tfsdk:"desc"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
Id types.String `tfsdk:"id"`
Name types.String `tfsdk:"name"`
RGAccess types.List `tfsdk:"rg_access"`
Status types.String `tfsdk:"status"`
OVSBridge types.String `tfsdk:"ovs_bridge"`
VlanID types.Int64 `tfsdk:"vlan_id"`
ComputeIDs types.List `tfsdk:"compute_ids"`
UpdatedTime types.Int64 `tfsdk:"updated_time"`
}

View File

@@ -0,0 +1,42 @@
package models
import (
"github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/types"
)
type DataSourceDPDKListModel struct {
Timeouts timeouts.Value `tfsdk:"timeouts"`
// optional attributes
ByID types.Int64 `tfsdk:"by_id"`
GID types.Int64 `tfsdk:"gid"`
Name types.String `tfsdk:"name"`
Desc types.String `tfsdk:"desc"`
Status types.String `tfsdk:"status"`
ComputeIDs types.List `tfsdk:"compute_ids"`
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
// computed attributes
Id types.String `tfsdk:"id"`
Items []ItemDPDKListModel `tfsdk:"items"`
EntryCount types.Int64 `tfsdk:"entry_count"`
}
type ItemDPDKListModel struct {
DPDKID types.Int64 `tfsdk:"dpdk_id"`
AccountAccess types.List `tfsdk:"account_access"`
CreatedTime types.Int64 `tfsdk:"created_time"`
Desc types.String `tfsdk:"desc"`
GID types.Int64 `tfsdk:"gid"`
GUID types.Int64 `tfsdk:"guid"`
Name types.String `tfsdk:"name"`
RGAccess types.List `tfsdk:"rg_access"`
Status types.String `tfsdk:"status"`
OVSBridge types.String `tfsdk:"ovs_bridge"`
VlanID types.Int64 `tfsdk:"vlan_id"`
ComputeIDs types.List `tfsdk:"compute_ids"`
UpdatedTime types.Int64 `tfsdk:"updated_time"`
}

View File

@@ -0,0 +1,56 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceDPDK() map[string]schema.Attribute {
return map[string]schema.Attribute{
"dpdk_id": schema.Int64Attribute{
Required: true,
},
"account_access": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
"rg_access": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"status": schema.StringAttribute{
Computed: true,
},
"ovs_bridge": schema.StringAttribute{
Computed: true,
},
"vlan_id": schema.Int64Attribute{
Computed: true,
},
"compute_ids": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"updated_time": schema.Int64Attribute{
Computed: true,
},
"id": schema.StringAttribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,97 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)
func MakeSchemaDataSourceDPDKList() map[string]schema.Attribute {
return map[string]schema.Attribute{
// optional attributes
"by_id": schema.Int64Attribute{
Optional: true,
},
"gid": schema.Int64Attribute{
Optional: true,
},
"name": schema.StringAttribute{
Optional: true,
},
"desc": schema.StringAttribute{
Optional: true,
},
"status": schema.StringAttribute{
Optional: true,
},
"compute_ids": schema.ListAttribute{
Optional: true,
ElementType: types.Int64Type,
},
"sort_by": schema.StringAttribute{
Optional: true,
},
"page": schema.Int64Attribute{
Optional: true,
},
"size": schema.Int64Attribute{
Optional: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"dpdk_id": schema.Int64Attribute{
Required: true,
},
"account_access": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
"desc": schema.StringAttribute{
Computed: true,
},
"gid": schema.Int64Attribute{
Computed: true,
},
"guid": schema.Int64Attribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
"rg_access": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"status": schema.StringAttribute{
Computed: true,
},
"ovs_bridge": schema.StringAttribute{
Computed: true,
},
"vlan_id": schema.Int64Attribute{
Computed: true,
},
"compute_ids": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"updated_time": schema.Int64Attribute{
Computed: true,
},
},
},
},
"entry_count": schema.Int64Attribute{
Computed: true,
},
}
}

View File

@@ -0,0 +1,27 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/dpdknet"
)
func DPDKDataSourceCheckPresence(ctx context.Context, dpdkId uint64, c *decort.DecortClient) (*dpdknet.RecordDPDKNet, diag.Diagnostics) {
tflog.Info(ctx, fmt.Sprintf("DPDKDataSourceCheckPresence: Get info about DPDK net with ID - %d", dpdkId))
diags := diag.Diagnostics{}
recordDPDK, err := c.CloudAPI().DPDKNet().Get(ctx, dpdknet.GetRequest{DPDKID: dpdkId})
if err != nil {
diags.AddError(fmt.Sprintf("Cannot get info about DPDK net with ID %d", dpdkId), err.Error())
return nil, diags
}
tflog.Info(ctx, "DPDKDataSourceCheckPresence: response from CloudBroker().DPDKNet().Get", map[string]any{"dpdk_id": dpdkId, "response": recordDPDK})
return recordDPDK, nil
}

View File

@@ -0,0 +1,61 @@
package utilities
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/dpdknet"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/models"
)
func DPDKListDataSourceCheckPresence(ctx context.Context, plan *models.DataSourceDPDKListModel, c *decort.DecortClient) (*dpdknet.ListDPDKNet, error) {
tflog.Info(ctx, fmt.Sprintf("DPDKListDataSourceCheckPresence: Get DPDK list info"))
listDPDKReq := dpdknet.ListRequest{}
if !plan.ByID.IsNull() {
listDPDKReq.ByID = uint64(plan.ByID.ValueInt64())
}
if !plan.GID.IsNull() {
listDPDKReq.GID = uint64(plan.GID.ValueInt64())
}
if !plan.Name.IsNull() {
listDPDKReq.Name = plan.Name.ValueString()
}
if !plan.Desc.IsNull() {
listDPDKReq.Description = plan.Desc.ValueString()
}
if !plan.Status.IsNull() {
listDPDKReq.Status = plan.Status.ValueString()
}
if !plan.ComputeIDs.IsNull() {
computeIDs := make([]uint64, 0, len(plan.ComputeIDs.Elements()))
diags := plan.ComputeIDs.ElementsAs(ctx, &computeIDs, false)
if diags.HasError() {
tflog.Error(ctx, "DPDKListDataSourceCheckPresence: cannot populate computeIDs with plan.ComputeIDs List elements")
return nil, fmt.Errorf("cannot populate computeIDs with plan.ComputeIDs List elements")
}
listDPDKReq.ComputeIDs = computeIDs
}
if !plan.SortBy.IsNull() {
listDPDKReq.SortBy = plan.SortBy.ValueString()
}
if !plan.Page.IsNull() {
listDPDKReq.Page = uint64(plan.Page.ValueInt64())
}
if !plan.Size.IsNull() {
listDPDKReq.Size = uint64(plan.Size.ValueInt64())
}
tflog.Info(ctx, "DPDKListDataSourceCheckPresence: before call CloudAPI().DPDKNet().List", map[string]any{"response": listDPDKReq})
dpdkList, err := c.CloudAPI().DPDKNet().List(ctx, listDPDKReq)
if err != nil {
return nil, fmt.Errorf("cannot get info about data source list DPDK net with error: %w", err)
}
tflog.Info(ctx, "DPDKListDataSourceCheckPresence: response from CloudAPI().DPDKNet().List", map[string]any{"response": dpdkList})
return dpdkList, err
}

View File

@@ -67,7 +67,8 @@ func FlipgroupListDataSourceCheckPresence(ctx context.Context, plan *models.Data
for _, val := range plan.CliendIDs.Elements() {
result = append(result, uint64(val.(types.Int64).ValueInt64()))
}
listFlipgroupReq.ClientIDs = result
//TODO fix it
//listFlipgroupReq.ClientIDs = result
}
if !plan.Status.IsNull() {

View File

@@ -9,6 +9,7 @@ import (
account "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/dpdknet"
extnet "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/extnet"
image "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/image"
k8ci "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8ci"
@@ -448,6 +449,30 @@ func ExistVFPool(ctx context.Context, vfpoolId uint64, c *decort.DecortClient) e
return nil
}
func ExistDPDK(ctx context.Context, dpdkId uint64, c *decort.DecortClient) error {
req := dpdknet.ListRequest{
ByID: dpdkId,
}
dpdkList, err := c.CloudAPI().DPDKNet().List(ctx, req)
if err != nil {
return err
}
if len(dpdkList.Data) == 0 {
return fmt.Errorf("DPDK net with ID %v not found", dpdkId)
}
for _, item := range dpdkList.Data {
if item.Status != "ENABLED" {
return fmt.Errorf("DPDK net with ID %v must be enabled", dpdkId)
}
}
return nil
}
func ExistSnapshotInCompute(ctx context.Context, computeID uint64, label string, c *decort.DecortClient) error {
req := compute.SnapshotListRequest{
ComputeID: computeID,

View File

@@ -42,6 +42,7 @@ func DataSourceImageList(ctx context.Context, state *models.ListImagesModel, c *
Bootable: state.Bootable,
SortBy: state.SortBy,
Page: state.Page,
Enabled: state.Enabled,
Size: state.Size,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),

View File

@@ -48,7 +48,6 @@ func ResourceImage(ctx context.Context, plan *models.ImageResourceModel, c *deco
SepID: types.Int64Value(int64(image.SepID)),
PoolName: types.StringValue(image.Pool),
Architecture: types.StringValue(image.Architecture),
Permanently: plan.Permanently,
ImageId: types.Int64Value(int64(image.ID)),
Timeouts: plan.Timeouts,
Id: types.StringValue(strconv.Itoa(int(image.ID))),

View File

@@ -33,7 +33,6 @@ func ResourceImageVirtual(ctx context.Context, plan *models.ImageVirtualResource
*plan = models.ImageVirtualResourceModel{
ImageName: types.StringValue(image.Name),
LinkTo: types.Int64Value(int64(image.LinkTo)),
Permanently: plan.Permanently,
Timeouts: plan.Timeouts,
Id: types.StringValue(strconv.Itoa(int(image.ID))),
LastUpdated: plan.LastUpdated,

View File

@@ -22,6 +22,7 @@ type ListImagesModel struct {
SortBy types.String `tfsdk:"sort_by"`
Page types.Int64 `tfsdk:"page"`
Size types.Int64 `tfsdk:"size"`
Enabled types.Bool `tfsdk:"enabled"`
// responce fields
Id types.String `tfsdk:"id"`

View File

@@ -23,7 +23,6 @@ type ImageResourceModel struct {
SepID types.Int64 `tfsdk:"sep_id"`
PoolName types.String `tfsdk:"pool_name"`
Architecture types.String `tfsdk:"architecture"`
Permanently types.Bool `tfsdk:"permanently"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// responce fields
Id types.String `tfsdk:"id"`

View File

@@ -7,10 +7,9 @@ import (
type ImageVirtualResourceModel struct {
// request fields
ImageName types.String `tfsdk:"image_name"`
LinkTo types.Int64 `tfsdk:"link_to"`
Permanently types.Bool `tfsdk:"permanently"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
ImageName types.String `tfsdk:"image_name"`
LinkTo types.Int64 `tfsdk:"link_to"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// responce fields
Id types.String `tfsdk:"id"`
LastUpdated types.String `tfsdk:"last_updated"`

View File

@@ -234,15 +234,8 @@ func (r *resourceImage) Delete(ctx context.Context, req resource.DeleteRequest,
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
defer cancel()
var permanently bool
if state.Permanently.IsNull() {
permanently = true
} else {
permanently = state.Permanently.ValueBool()
}
// Delete image
_, err := r.client.CloudAPI().Image().Delete(ctx, image.DeleteRequest{ImageID: uint64(state.ImageId.ValueInt64()), Permanently: permanently})
_, err := r.client.CloudAPI().Image().Delete(ctx, image.DeleteRequest{ImageID: uint64(state.ImageId.ValueInt64())})
if err != nil {
resp.Diagnostics.AddError("Error deleting image with error: ", err.Error())
return

View File

@@ -229,15 +229,8 @@ func (r *resourceImageVirtual) Delete(ctx context.Context, req resource.DeleteRe
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
defer cancel()
var permanently bool
if state.Permanently.IsNull() {
permanently = true
} else {
permanently = state.Permanently.ValueBool()
}
// Delete image
_, err := r.client.CloudAPI().Image().Delete(ctx, image.DeleteRequest{ImageID: uint64(state.ImageId.ValueInt64()), Permanently: permanently})
_, err := r.client.CloudAPI().Image().Delete(ctx, image.DeleteRequest{ImageID: uint64(state.ImageId.ValueInt64())})
if err != nil {
resp.Diagnostics.AddError("Error deleting image virtual with error: ", err.Error())
return

View File

@@ -54,6 +54,9 @@ func MakeSchemaDataSourceImageList() map[string]schema.Attribute {
"size": schema.Int64Attribute{
Optional: true,
},
"enabled": schema.BoolAttribute{
Optional: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,

View File

@@ -84,12 +84,9 @@ func MakeSchemaResourceImage() map[string]schema.Attribute {
Computed: true,
Optional: true,
Validators: []validator.String{
stringvalidator.OneOf("X86_64", "PPC64_LE"),
stringvalidator.OneOf("X86_64"),
},
Description: "Binary architecture of this image, one of X86_64 of PPC64_LE",
},
"permanently": schema.BoolAttribute{
Optional: true,
Description: "Binary architecture of this image, one of X86_64",
},
// computed attributes
"id": schema.StringAttribute{

View File

@@ -18,10 +18,6 @@ func MakeSchemaResourceImageVirtual() map[string]schema.Attribute {
Required: true,
Description: "ID of real image to link this virtual image to upon creation",
},
// optional attributes
"permanently": schema.BoolAttribute{
Optional: true,
},
// computed attributes
"id": schema.StringAttribute{
Computed: true,

View File

@@ -56,6 +56,9 @@ func ImageListCheckPresence(ctx context.Context, state *models.ListImagesModel,
if !state.Size.IsNull() {
req.Size = uint64(state.Size.ValueInt64())
}
if !state.Enabled.IsNull() {
req.Enabled = state.Enabled.ValueBool()
}
if !state.Page.IsNull() {
req.Page = uint64(state.Page.ValueInt64())
}

View File

@@ -60,31 +60,34 @@ func DataSourceK8s(ctx context.Context, state *models.RecordK8SDataSourceModel,
id := uuid.New()
*state = models.RecordK8SDataSourceModel{
K8SID: state.K8SID,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
ACL: flattenACLDataSource(ctx, &cluster.ACL),
AccountID: types.Int64Value(int64(cluster.AccountID)),
AccountName: types.StringValue(cluster.AccountName),
BServiceID: types.Int64Value(int64(cluster.BServiceID)),
K8CI: types.Int64Value(int64(cluster.CIID)),
CreatedBy: types.StringValue(cluster.CreatedBy),
CreatedTime: types.Int64Value(int64(cluster.CreatedTime)),
DeletedBy: types.StringValue(cluster.DeletedBy),
DeletedTime: types.Int64Value(int64(cluster.DeletedTime)),
K8CIName: types.StringValue(cluster.K8CIName),
Masters: flattenMasterGroup(ctx, &cluster.K8SGroups.Masters, masterComputeList),
Workers: flattenK8sGroup(ctx, &cluster.K8SGroups.Workers, workersComputeList),
LBID: types.Int64Value(int64(cluster.LBID)),
Name: types.StringValue(cluster.Name),
NetworkPlugin: types.StringValue(cluster.NetworkPlugin),
RGID: types.Int64Value(int64(cluster.RGID)),
RGName: types.StringValue(cluster.RGName),
Status: types.StringValue(cluster.Status),
TechStatus: types.StringValue(cluster.TechStatus),
UpdatedBy: types.StringValue(cluster.UpdatedBy),
UpdatedTime: types.Int64Value(int64(cluster.UpdatedTime)),
VinsId: types.Int64Value(int64(k8sList.Data[0].VINSID)),
K8SID: state.K8SID,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),
ACL: flattenACLDataSource(ctx, &cluster.ACL),
AccountID: types.Int64Value(int64(cluster.AccountID)),
AccountName: types.StringValue(cluster.AccountName),
K8SAddressVIP: flattenAddressVIP(ctx, cluster.AddressVIP),
BServiceID: types.Int64Value(int64(cluster.BServiceID)),
K8CI: types.Int64Value(int64(cluster.CIID)),
CreatedBy: types.StringValue(cluster.CreatedBy),
CreatedTime: types.Int64Value(int64(cluster.CreatedTime)),
DeletedBy: types.StringValue(cluster.DeletedBy),
DeletedTime: types.Int64Value(int64(cluster.DeletedTime)),
ExtnetOnly: types.BoolValue(cluster.ExtnetOnly),
HighlyAvailableLB: types.BoolValue(cluster.HighlyAvailableLB),
K8CIName: types.StringValue(cluster.K8CIName),
Masters: flattenMasterGroup(ctx, &cluster.K8SGroups.Masters, masterComputeList),
Workers: flattenK8sGroup(ctx, &cluster.K8SGroups.Workers, workersComputeList),
LBID: types.Int64Value(int64(cluster.LBID)),
Name: types.StringValue(cluster.Name),
NetworkPlugin: types.StringValue(cluster.NetworkPlugin),
RGID: types.Int64Value(int64(cluster.RGID)),
RGName: types.StringValue(cluster.RGName),
Status: types.StringValue(cluster.Status),
TechStatus: types.StringValue(cluster.TechStatus),
UpdatedBy: types.StringValue(cluster.UpdatedBy),
UpdatedTime: types.Int64Value(int64(cluster.UpdatedTime)),
VinsId: types.Int64Value(int64(k8sList.Data[0].VINSID)),
}
if cluster.LBID != 0 {
@@ -106,6 +109,18 @@ func DataSourceK8s(ctx context.Context, state *models.RecordK8SDataSourceModel,
return nil
}
func flattenAddressVIP(ctx context.Context, addressedVip k8s.K8SAddressVIP) *models.K8SAddressVIP {
tflog.Info(ctx, "Start flattenAddressVIP")
res := models.K8SAddressVIP{
BackendIP: types.StringValue(addressedVip.BackendIP),
FrontendIP: types.StringValue(addressedVip.FrontendIP),
}
tflog.Info(ctx, "End flattenAddressVIP")
return &res
}
func flattenMasterGroup(ctx context.Context, mastersGroup *k8s.MasterGroup, masters []*compute.RecordCompute) *models.MasterGroupDataSourceModel {
tflog.Info(ctx, "Start flattenMasterGroup")

View File

@@ -7,6 +7,7 @@ import (
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
@@ -67,6 +68,7 @@ func K8SCPResource(ctx context.Context, plan *models.ResourceK8SCPModel, c *deco
Description: plan.Description,
ExtNetOnly: plan.ExtNetOnly,
OidcCertificate: plan.OidcCertificate,
Chipset: plan.Chipset,
Start: plan.Start,
Enabled: plan.Enabled,
Permanently: plan.Permanently,
@@ -90,6 +92,7 @@ func K8SCPResource(ctx context.Context, plan *models.ResourceK8SCPModel, c *deco
CreatedTime: types.Int64Value(int64(cluster.CreatedTime)),
DeletedBy: types.StringValue(cluster.DeletedBy),
DeletedTime: types.Int64Value(int64(cluster.DeletedTime)),
K8SAddressVIP: flattenK8SAddressVIP(ctx, cluster.AddressVIP),
K8SID: types.Int64Value(int64(cluster.ID)),
K8CIName: types.StringValue(cluster.K8CIName),
LBID: types.Int64Value(int64(cluster.LBID)),
@@ -126,6 +129,23 @@ func K8SCPResource(ctx context.Context, plan *models.ResourceK8SCPModel, c *deco
return nil
}
func flattenK8SAddressVIP(ctx context.Context, addressedVip k8s.K8SAddressVIP) basetypes.ObjectValue {
tflog.Info(ctx, "Start flattenAddressVIP")
temp := models.AddressVIPModel{
BackendIP: types.StringValue(addressedVip.BackendIP),
FrontendIP: types.StringValue(addressedVip.FrontendIP),
}
res, err := types.ObjectValueFrom(ctx, models.AddressVIP, temp)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenK8SAddressVIP struct to obj", err))
}
tflog.Info(ctx, "End flattenAddressVIP")
return res
}
func flattenDetailedInfo(ctx context.Context, di *k8s.ListDetailedInfo, computes []*compute.RecordCompute) types.List {
tflog.Info(ctx, "Start flattenDetailedInfo")

View File

@@ -48,6 +48,8 @@ func K8SWGResource(ctx context.Context, plan *models.ResourceK8SWGModel, c *deco
WorkerSEPPool: plan.WorkerSEPPool,
CloudInit: plan.CloudInit,
Timeouts: plan.Timeouts,
Chipset: plan.Chipset,
WorkerChipset: plan.WorkerChipset,
Id: types.StringValue(strconv.Itoa(int(wg.ID))),
WorkerGroupId: types.Int64Value(int64(wg.ID)),
LastUpdated: plan.LastUpdated,

View File

@@ -10,32 +10,40 @@ type RecordK8SDataSourceModel struct {
K8SID types.Int64 `tfsdk:"k8s_id"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields
Id types.String `tfsdk:"id"`
ACL *RecordACLDataSourceModel `tfsdk:"acl"`
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
BServiceID types.Int64 `tfsdk:"bservice_id"`
K8CI types.Int64 `tfsdk:"k8sci_id"`
CreatedBy types.String `tfsdk:"created_by"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedBy types.String `tfsdk:"deleted_by"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
ExtNetID types.Int64 `tfsdk:"extnet_id"`
K8CIName types.String `tfsdk:"k8sci_name"`
Masters *MasterGroupDataSourceModel `tfsdk:"masters"`
Workers []ItemK8SGroupDataSourceModel `tfsdk:"workers"`
LBID types.Int64 `tfsdk:"lb_id"`
LBIP types.String `tfsdk:"lb_ip"`
Name types.String `tfsdk:"name"`
NetworkPlugin types.String `tfsdk:"network_plugin"`
RGID types.Int64 `tfsdk:"rg_id"`
RGName types.String `tfsdk:"rg_name"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
UpdatedBy types.String `tfsdk:"updated_by"`
UpdatedTime types.Int64 `tfsdk:"updated_time"`
Kubeconfig types.String `tfsdk:"kubeconfig"`
VinsId types.Int64 `tfsdk:"vins_id"`
Id types.String `tfsdk:"id"`
ACL *RecordACLDataSourceModel `tfsdk:"acl"`
AccountID types.Int64 `tfsdk:"account_id"`
AccountName types.String `tfsdk:"account_name"`
BServiceID types.Int64 `tfsdk:"bservice_id"`
K8CI types.Int64 `tfsdk:"k8sci_id"`
CreatedBy types.String `tfsdk:"created_by"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedBy types.String `tfsdk:"deleted_by"`
ExtnetOnly types.Bool `tfsdk:"extnet_only"`
HighlyAvailableLB types.Bool `tfsdk:"ha_mode"`
K8SAddressVIP *K8SAddressVIP `tfsdk:"address_vip"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
ExtNetID types.Int64 `tfsdk:"extnet_id"`
K8CIName types.String `tfsdk:"k8sci_name"`
Masters *MasterGroupDataSourceModel `tfsdk:"masters"`
Workers []ItemK8SGroupDataSourceModel `tfsdk:"workers"`
LBID types.Int64 `tfsdk:"lb_id"`
LBIP types.String `tfsdk:"lb_ip"`
Name types.String `tfsdk:"name"`
NetworkPlugin types.String `tfsdk:"network_plugin"`
RGID types.Int64 `tfsdk:"rg_id"`
RGName types.String `tfsdk:"rg_name"`
Status types.String `tfsdk:"status"`
TechStatus types.String `tfsdk:"tech_status"`
UpdatedBy types.String `tfsdk:"updated_by"`
UpdatedTime types.Int64 `tfsdk:"updated_time"`
Kubeconfig types.String `tfsdk:"kubeconfig"`
VinsId types.Int64 `tfsdk:"vins_id"`
}
type K8SAddressVIP struct {
BackendIP types.String `tfsdk:"backend_ip"`
FrontendIP types.String `tfsdk:"frontend_ip"`
}
type RecordACLDataSourceModel struct {

View File

@@ -16,6 +16,7 @@ type ResourceK8SCPModel struct {
SEPPool types.String `tfsdk:"sep_pool"`
Num types.Int64 `tfsdk:"num"`
CPU types.Int64 `tfsdk:"cpu"`
Chipset types.String `tfsdk:"chipset"`
RAM types.Int64 `tfsdk:"ram"`
Disk types.Int64 `tfsdk:"disk"`
ExtNetID types.Int64 `tfsdk:"extnet_id"`
@@ -51,6 +52,7 @@ type ResourceK8SCPModel struct {
DeletedTime types.Int64 `tfsdk:"deleted_time"`
K8SID types.Int64 `tfsdk:"k8s_id"`
K8CIName types.String `tfsdk:"k8s_ci_name"`
K8SAddressVIP types.Object `tfsdk:"address_vip"`
LBID types.Int64 `tfsdk:"lb_id"`
LBIP types.String `tfsdk:"lb_ip"`
MasterGroupId types.Int64 `tfsdk:"master_group_id"`
@@ -63,6 +65,11 @@ type ResourceK8SCPModel struct {
Kubeconfig types.String `tfsdk:"kubeconfig"`
}
type AddressVIPModel struct {
BackendIP types.String `tfsdk:"backend_ip"`
FrontendIP types.String `tfsdk:"frontend_ip"`
}
type RecordACLModel struct {
AccountACL types.List `tfsdk:"account_acl"`
K8SACL types.List `tfsdk:"k8s_acl"`
@@ -93,6 +100,11 @@ type ItemInterfacesModel struct {
IpAddress types.String `tfsdk:"ip_address"`
}
var AddressVIP map[string]attr.Type = map[string]attr.Type{
"backend_ip": types.StringType,
"frontend_ip": types.StringType,
}
var ItemInterfaces map[string]attr.Type = map[string]attr.Type{
"def_gw": types.StringType,
"ip_address": types.StringType,

View File

@@ -11,8 +11,10 @@ type ResourceK8SWGModel struct {
K8SID types.Int64 `tfsdk:"k8s_id"`
Name types.String `tfsdk:"name"`
Num types.Int64 `tfsdk:"num"`
WorkerChipset types.String `tfsdk:"worker_chipset"`
CPU types.Int64 `tfsdk:"cpu"`
RAM types.Int64 `tfsdk:"ram"`
Chipset types.String `tfsdk:"chipset"`
Disk types.Int64 `tfsdk:"disk"`
Annotations types.List `tfsdk:"annotations"`
Labels types.List `tfsdk:"labels"`

View File

@@ -122,6 +122,23 @@ func MakeSchemaDataSourceK8S() map[string]schema.Attribute {
"extnet_id": schema.Int64Attribute{
Computed: true,
},
"extnet_only": schema.BoolAttribute{
Computed: true,
},
"ha_mode": schema.BoolAttribute{
Computed: true,
},
"address_vip": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"backend_ip": schema.StringAttribute{
Computed: true,
},
"frontend_ip": schema.StringAttribute{
Computed: true,
},
},
},
"k8sci_name": schema.StringAttribute{
Computed: true,
},

View File

@@ -126,7 +126,14 @@ func MakeSchemaResourceK8SCP() map[string]schema.Attribute {
},
"oidc_cert": schema.StringAttribute{
Optional: true,
Description: "insert ssl certificate in x509 pem format",
Description: "Insert ssl certificate in x509 pem format",
},
"chipset": schema.StringAttribute{
Optional: true,
Description: "Type of the emulated system",
Validators: []validator.String{
stringvalidator.OneOfCaseInsensitive("Q35", "i440fx"),
},
},
"lb_sysctl_params": schema.ListNestedAttribute{
Optional: true,
@@ -302,6 +309,17 @@ func MakeSchemaResourceK8SCP() map[string]schema.Attribute {
"account_name": schema.StringAttribute{
Computed: true,
},
"address_vip": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"backend_ip": schema.StringAttribute{
Computed: true,
},
"frontend_ip": schema.StringAttribute{
Computed: true,
},
},
},
"bservice_id": schema.Int64Attribute{
Computed: true,
},

View File

@@ -1,6 +1,7 @@
package schemas
import (
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
@@ -28,11 +29,25 @@ func MakeSchemaResourceK8SWG() map[string]schema.Attribute {
Computed: true,
Description: "Number of worker nodes to create.",
},
"worker_chipset": schema.StringAttribute{
Optional: true,
Description: "Type of the emulated system of worker nodes",
Validators: []validator.String{
stringvalidator.OneOfCaseInsensitive("Q35", "i440fx"),
},
},
"cpu": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Worker node CPU count.",
},
"chipset": schema.StringAttribute{
Optional: true,
Description: "Type of the emulated system of work group",
Validators: []validator.String{
stringvalidator.OneOfCaseInsensitive("Q35", "i440fx"),
},
},
"ram": schema.Int64Attribute{
Optional: true,
Computed: true,

View File

@@ -100,6 +100,10 @@ func CreateRequestResourceK8CP(ctx context.Context, plan *models.ResourceK8SCPMo
req.OidcCertificate = plan.OidcCertificate.ValueString()
}
if !plan.Chipset.IsNull() {
req.Chipset = plan.Chipset.ValueString()
}
if !plan.Description.IsNull() {
req.Description = plan.Description.ValueString()
}
@@ -427,7 +431,7 @@ func K8SCPDeleteMaster(ctx context.Context, plan *models.ResourceK8SCPModel, sta
diags := diag.Diagnostics{}
deleteMasterComp := make([]string, 0)
deleteMasterComp := make([]uint64, 0)
for i, val := range state.DetailedInfo.Elements() {
if i == 2 {
@@ -437,15 +441,16 @@ func K8SCPDeleteMaster(ctx context.Context, plan *models.ResourceK8SCPModel, sta
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenDetailedInfo struct to obj", err), map[string]any{"k8s_id": plan.Id.ValueString()})
}
id := obj.Attributes()["compute_id"]
id := obj.Attributes()["compute_id"].(types.Int64).ValueInt64()
deleteMasterComp = append(deleteMasterComp, id.String())
deleteMasterComp = append(deleteMasterComp, uint64(id))
}
req := k8s.DeleteMasterFromGroupRequest{
K8SID: uint64(state.K8SID.ValueInt64()),
MasterGroupID: uint64(state.MasterGroupId.ValueInt64()),
MasterIDs: deleteMasterComp,
//TODO fix it
//MasterIDs: deleteMasterComp,
}
_, err := c.CloudAPI().K8S().DeleteMasterFromGroup(ctx, req)

View File

@@ -13,6 +13,7 @@ import (
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/tasks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/k8s/models"
)
@@ -76,6 +77,9 @@ func CreateRequestResourceK8WG(ctx context.Context, plan *models.ResourceK8SWGMo
if !plan.CloudInit.IsNull() {
req.UserData = plan.CloudInit.ValueString()
}
if !plan.Chipset.IsNull() {
req.Chipset = plan.Chipset.ValueString()
}
tflog.Info(ctx, "End CreateRequestResourceK8WG", map[string]any{"name": plan.Name.ValueString()})
return req
@@ -87,14 +91,44 @@ func ResourceK8SWGCreate(ctx context.Context, plan *models.ResourceK8SWGModel, c
diags := diag.Diagnostics{}
// Make request and get response
wgId, err := c.CloudAPI().K8S().WorkersGroupAdd(ctx, CreateRequestResourceK8WG(ctx, plan))
resp, err := c.CloudAPI().K8S().WorkersGroupAdd(ctx, CreateRequestResourceK8WG(ctx, plan))
if err != nil {
tflog.Error(ctx, "Error response for create k8s_wg")
diags.AddError("Unable to Create K8SWG", err.Error())
return diags
}
plan.Id = types.StringValue(strconv.Itoa(int(wgId)))
taskReq := tasks.GetRequest{
AuditID: strings.Trim(resp, `"`),
}
for {
task, err := c.CloudAPI().Tasks().Get(ctx, taskReq)
if err != nil {
diags.AddError("The audit cannot be found", err.Error())
return diags
}
tflog.Info(ctx, fmt.Sprintf("ResourceK8SWGCreate instance creating - %s", task.Stage))
if task.Completed {
if task.Error != "" {
diags.AddError("Cannot create k8s wg instance:", task.Error)
return diags
}
result, err := task.Result.ID()
if err != nil {
diags.AddError("Cannot get wg ID:", err.Error())
return diags
}
plan.Id = types.StringValue(strconv.Itoa(result))
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
break
}
time.Sleep(time.Second * 20)
}
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
wg, k8sId, err := K8SWGResourceCheckPresence(ctx, plan, c)
@@ -185,6 +219,7 @@ func K8SWGUpdateNumWorkers(ctx context.Context, plan, state *models.ResourceK8SW
K8SID: k8sId,
WorkersGroupID: wg.ID,
Num: uint64(newNum) - wg.Num,
Chipset: plan.WorkerChipset.ValueString(),
}
tflog.Info(ctx, "Add workers in wg with id", map[string]any{"wg_id": state.WorkerGroupId.ValueInt64(), "k8s_id": state.K8SID.ValueInt64()})

View File

@@ -11,6 +11,7 @@ import (
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
)
@@ -26,6 +27,8 @@ func DataSourceCompute(ctx context.Context, state *models.RecordComputeModel, c
return diags
}
pciDevicesList, err := utilities.ComputePCIDevicesListCheckPresence(ctx, state, c)
id := uuid.New()
customFields, _ := json.Marshal(computeRecord.CustomFields)
@@ -44,6 +47,7 @@ func DataSourceCompute(ctx context.Context, state *models.RecordComputeModel, c
AffinityWeight: types.Int64Value(int64(computeRecord.AffinityWeight)),
AntiAffinityRules: flattenAffinityRule(ctx, &computeRecord.AntiAffinityRules),
Architecture: types.StringValue(computeRecord.Architecture),
Chipset: types.StringValue(computeRecord.Chipset),
BootDiskSize: types.Int64Value(int64(computeRecord.BootDiskSize)),
CdImageId: types.Int64Value(int64(computeRecord.CdImageId)),
CloneReference: types.Int64Value(int64(computeRecord.CloneReference)),
@@ -81,6 +85,7 @@ func DataSourceCompute(ctx context.Context, state *models.RecordComputeModel, c
NumaNodeId: types.Int64Value(int64(computeRecord.NumaNodeId)),
OSUsers: flattenOSUsers(ctx, &computeRecord.OSUsers),
Pinned: types.BoolValue(computeRecord.Pinned),
PCIDevices: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, flattenPCI(ctx, pciDevicesList)),
RAM: types.Int64Value(int64(computeRecord.RAM)),
ReferenceID: types.StringValue(computeRecord.ReferenceID),
Registered: types.BoolValue(computeRecord.Registered),
@@ -189,6 +194,7 @@ func flattenDisks(ctx context.Context, disks *compute.ListComputeDisks) []models
ACL: types.StringValue(string(acl)),
AccountID: types.Int64Value(int64(item.AccountID)),
BootPartition: types.Int64Value(int64(item.BootPartition)),
BusNumber: types.Int64Value(int64(item.BusNumber)),
CreatedTime: types.Int64Value(int64(item.CreatedTime)),
DeletedTime: types.Int64Value(int64(item.CreatedTime)),
Description: types.StringValue(item.Description),
@@ -290,6 +296,7 @@ func flattenInterfaces(ctx context.Context, interfaces *compute.ListInterfaces)
res := make([]models.ItemInterfaceModel, 0, len(*interfaces))
for _, item := range *interfaces {
temp := models.ItemInterfaceModel{
BusNumber: types.Int64Value(int64(item.BusNumber)),
ConnID: types.Int64Value(int64(item.ConnID)),
ConnType: types.StringValue(item.ConnType),
DefGW: types.StringValue(item.DefGW),
@@ -297,14 +304,24 @@ func flattenInterfaces(ctx context.Context, interfaces *compute.ListInterfaces)
FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)),
GUID: types.StringValue(item.GUID),
IPAddress: types.StringValue(item.IPAddress),
ListenSSH: types.BoolValue(item.ListenSSH),
MAC: types.StringValue(item.MAC),
Name: types.StringValue(item.Name),
NetID: types.Int64Value(int64(item.NetID)),
NetMask: types.Int64Value(int64(item.NetMask)),
NetType: types.StringValue(item.NetType),
NodeID: types.Int64Value(int64(item.NodeID)),
PCISlot: types.Int64Value(item.PCISlot),
LibvirtSettings: &models.LibvirtModel{
GUID: types.StringValue(item.LibvirtSettings.GUID),
TXMode: types.StringValue(item.LibvirtSettings.TXMode),
IOEventFD: types.StringValue(item.LibvirtSettings.IOEventFD),
EventIDx: types.StringValue(item.LibvirtSettings.EventIDx),
Queues: types.Int64Value(int64(item.LibvirtSettings.Queues)),
RXQueueSize: types.Int64Value(int64(item.LibvirtSettings.RXQueueSize)),
TXQueueSize: types.Int64Value(int64(item.LibvirtSettings.TXQueueSize)),
},
ListenSSH: types.BoolValue(item.ListenSSH),
MAC: types.StringValue(item.MAC),
MTU: types.Int64Value(int64(item.MTU)),
Name: types.StringValue(item.Name),
NetID: types.Int64Value(int64(item.NetID)),
NetMask: types.Int64Value(int64(item.NetMask)),
NetType: types.StringValue(item.NetType),
NodeID: types.Int64Value(int64(item.NodeID)),
PCISlot: types.Int64Value(item.PCISlot),
QOS: &models.QOSModel{
ERate: types.Int64Value(int64(item.QOS.ERate)),
GUID: types.StringValue(item.QOS.GUID),
@@ -365,3 +382,15 @@ func flattenOSUsers(ctx context.Context, osUsers *compute.ListOSUser) []models.I
tflog.Info(ctx, "End flattenOSUsers")
return res
}
func flattenPCI(ctx context.Context, pciList *compute.ListPCIDevices) []uint64 {
tflog.Info(ctx, "Start flattenPCI")
res := make([]uint64, 0, len(pciList.Data))
for _, v := range pciList.Data {
res = append(res, v.ID)
}
tflog.Info(ctx, "End flattenPCI")
return res
}

View File

@@ -75,6 +75,7 @@ func flattenItemsList(ctx context.Context, computes *compute.ListComputes) []mod
BootDiskSize: types.Int64Value(int64(item.BootDiskSize)),
CdImageId: types.Int64Value(int64(item.CdImageId)),
CloneReference: types.Int64Value(int64(item.CloneReference)),
Chipset: types.StringValue(item.Chipset),
ComputeCIID: types.Int64Value(int64(item.ComputeCIID)),
CPU: types.Int64Value(int64(item.CPU)),
CPUPin: types.BoolValue(item.CPUPin),
@@ -195,8 +196,9 @@ func flattenDisksInList(ctx context.Context, disks *compute.ListInfoDisks) []mod
res := make([]models.DiskInListModel, 0, len(*disks))
for _, item := range *disks {
temp := models.DiskInListModel{
DiskId: types.Int64Value(int64(item.ID)),
PCISlot: types.Int64Value(item.PCISlot),
BusNumber: types.Int64Value(int64(item.BusNumber)),
DiskId: types.Int64Value(int64(item.ID)),
PCISlot: types.Int64Value(item.PCISlot),
}
res = append(res, temp)
}
@@ -213,6 +215,7 @@ func flattenInterfaceInList(ctx context.Context, interfaces *compute.ListInterfa
res := make([]models.ItemVNFInterfaceInListModel, 0, len(*interfaces))
for _, item := range *interfaces {
temp := models.ItemVNFInterfaceInListModel{
BusNumber: types.Int64Value(int64(item.BusNumber)),
ConnID: types.Int64Value(int64(item.ConnID)),
ConnType: types.StringValue(item.ConnType),
DefGW: types.StringValue(item.DefGW),
@@ -220,14 +223,23 @@ func flattenInterfaceInList(ctx context.Context, interfaces *compute.ListInterfa
FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)),
GUID: types.StringValue(item.GUID),
IPAddress: types.StringValue(item.IPAddress),
ListenSSH: types.BoolValue(item.ListenSSH),
MAC: types.StringValue(item.MAC),
Name: types.StringValue(item.Name),
NetID: types.Int64Value(int64(item.NetID)),
NetMask: types.Int64Value(int64(item.NetMask)),
NetType: types.StringValue(item.NetType),
NodeID: types.Int64Value(int64(item.NodeID)),
PCISlot: types.Int64Value(item.PCISlot),
LibvirtSettings: &models.LibvirtModel{
GUID: types.StringValue(item.LibvirtSettings.GUID),
TXMode: types.StringValue(item.LibvirtSettings.TXMode),
IOEventFD: types.StringValue(item.LibvirtSettings.IOEventFD),
EventIDx: types.StringValue(item.LibvirtSettings.EventIDx),
Queues: types.Int64Value(int64(item.LibvirtSettings.Queues)),
RXQueueSize: types.Int64Value(int64(item.LibvirtSettings.RXQueueSize)),
TXQueueSize: types.Int64Value(int64(item.LibvirtSettings.TXQueueSize)),
},
ListenSSH: types.BoolValue(item.ListenSSH),
MAC: types.StringValue(item.MAC),
Name: types.StringValue(item.Name),
NetID: types.Int64Value(int64(item.NetID)),
NetMask: types.Int64Value(int64(item.NetMask)),
NetType: types.StringValue(item.NetType),
NodeID: types.Int64Value(int64(item.NodeID)),
PCISlot: types.Int64Value(item.PCISlot),
QOS: &models.QOSInListModel{
ERate: types.Int64Value(int64(item.QOS.ERate)),
GUID: types.StringValue(item.QOS.GUID),

View File

@@ -72,6 +72,7 @@ func flattenItemsListDeleted(ctx context.Context, computes *compute.ListComputes
Architecture: types.StringValue(item.Architecture),
BootDiskSize: types.Int64Value(int64(item.BootDiskSize)),
CdImageId: types.Int64Value(int64(item.CdImageId)),
Chipset: types.StringValue(item.Chipset),
CloneReference: types.Int64Value(int64(item.CloneReference)),
ComputeCIID: types.Int64Value(int64(item.ComputeCIID)),
CPU: types.Int64Value(int64(item.CPU)),
@@ -193,8 +194,9 @@ func flattenDisksInListDeleted(ctx context.Context, disks *compute.ListInfoDisks
res := make([]models.DiskInListDeletedModel, 0, len(*disks))
for _, item := range *disks {
temp := models.DiskInListDeletedModel{
DiskId: types.Int64Value(int64(item.ID)),
PCISlot: types.Int64Value(item.PCISlot),
BusNumber: types.Int64Value(int64(item.BusNumber)),
DiskId: types.Int64Value(int64(item.ID)),
PCISlot: types.Int64Value(item.PCISlot),
}
res = append(res, temp)
}
@@ -211,6 +213,7 @@ func flattenInterfaceInListDeleted(ctx context.Context, interfaces *compute.List
res := make([]models.ItemVNFInterfaceInListDeletedModel, 0, len(*interfaces))
for _, item := range *interfaces {
temp := models.ItemVNFInterfaceInListDeletedModel{
BusNumber: types.Int64Value(int64(item.BusNumber)),
ConnID: types.Int64Value(int64(item.ConnID)),
ConnType: types.StringValue(item.ConnType),
DefGW: types.StringValue(item.DefGW),
@@ -218,14 +221,23 @@ func flattenInterfaceInListDeleted(ctx context.Context, interfaces *compute.List
FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)),
GUID: types.StringValue(item.GUID),
IPAddress: types.StringValue(item.IPAddress),
ListenSSH: types.BoolValue(item.ListenSSH),
MAC: types.StringValue(item.MAC),
Name: types.StringValue(item.Name),
NetID: types.Int64Value(int64(item.NetID)),
NetMask: types.Int64Value(int64(item.NetMask)),
NetType: types.StringValue(item.NetType),
NodeID: types.Int64Value(int64(item.NodeID)),
PCISlot: types.Int64Value(item.PCISlot),
LibvirtSettings: &models.LibvirtModel{
GUID: types.StringValue(item.LibvirtSettings.GUID),
TXMode: types.StringValue(item.LibvirtSettings.TXMode),
IOEventFD: types.StringValue(item.LibvirtSettings.IOEventFD),
EventIDx: types.StringValue(item.LibvirtSettings.EventIDx),
Queues: types.Int64Value(int64(item.LibvirtSettings.Queues)),
RXQueueSize: types.Int64Value(int64(item.LibvirtSettings.RXQueueSize)),
TXQueueSize: types.Int64Value(int64(item.LibvirtSettings.TXQueueSize)),
},
ListenSSH: types.BoolValue(item.ListenSSH),
MAC: types.StringValue(item.MAC),
Name: types.StringValue(item.Name),
NetID: types.Int64Value(int64(item.NetID)),
NetMask: types.Int64Value(int64(item.NetMask)),
NetType: types.StringValue(item.NetType),
NodeID: types.Int64Value(int64(item.NodeID)),
PCISlot: types.Int64Value(item.PCISlot),
QOS: &models.QOSInListModel{
ERate: types.Int64Value(int64(item.QOS.ERate)),
GUID: types.StringValue(item.QOS.GUID),

View File

@@ -49,15 +49,17 @@ func ComputeResource(ctx context.Context, plan *models.ResourceComputeModel, c *
AffinityRules: plan.AffinityRules,
AntiAffinityRules: plan.AntiAffinityRules,
CustomFields: types.StringValue(string(customFields)),
Chipset: types.StringValue(recordItemCompute.Chipset),
Stateless: plan.Stateless,
SepId: types.Int64Value(int64(bootdisk.SepID)),
Pool: types.StringValue(bootdisk.Pool),
ExtraDisks: plan.ExtraDisks,
Network: flattenNetwork(ctx, &recordItemCompute.Interfaces),
Network: flattenNetwork(ctx, plan.Network, &recordItemCompute.Interfaces),
Tags: plan.Tags,
PortForwarding: plan.PortForwarding,
UserAccess: plan.UserAccess,
Snapshot: plan.Snapshot,
PCIDevices: plan.PCIDevices,
Rollback: plan.Rollback,
CD: plan.CD,
PinToStack: plan.PinToStack,
@@ -205,6 +207,7 @@ func flattenDisk(ctx context.Context, disk *compute.ItemComputeDisk) types.Objec
ACL: types.StringValue(string(acl)),
AccountID: types.Int64Value(int64(disk.AccountID)),
BootPartition: types.Int64Value(int64(disk.BootPartition)),
BusNumber: types.Int64Value(int64(disk.BusNumber)),
CreatedTime: types.Int64Value(int64(disk.CreatedTime)),
DeletedTime: types.Int64Value(int64(disk.DeletedTime)),
Description: types.StringValue(disk.Description),
@@ -385,25 +388,28 @@ func flattenResourceInterfaces(ctx context.Context, interfaces *compute.ListInte
for _, item := range *interfaces {
temp := models.ItemResourceInterfacesModel{
ConnID: types.Int64Value(int64(item.ConnID)),
ConnType: types.StringValue(item.ConnType),
GetGW: types.StringValue(item.DefGW),
Enabled: types.BoolValue(item.Enabled),
FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)),
GUID: types.StringValue(item.GUID),
IPAddress: types.StringValue(item.IPAddress),
ListenSSH: types.BoolValue(item.ListenSSH),
MAC: types.StringValue(item.MAC),
Name: types.StringValue(item.Name),
NetID: types.Int64Value(int64(item.NetID)),
NetMask: types.Int64Value(int64(item.NetMask)),
NetType: types.StringValue(item.NetType),
NodeID: types.Int64Value(int64(item.NodeID)),
PCISlot: types.Int64Value(item.PCISlot),
QOS: flattenQOS(ctx, &item.QOS),
Target: types.StringValue(item.Target),
Type: types.StringValue(item.Type),
VNFs: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, item.VNFs),
BusNumber: types.Int64Value(int64(item.BusNumber)),
ConnID: types.Int64Value(int64(item.ConnID)),
ConnType: types.StringValue(item.ConnType),
GetGW: types.StringValue(item.DefGW),
Enabled: types.BoolValue(item.Enabled),
FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)),
GUID: types.StringValue(item.GUID),
IPAddress: types.StringValue(item.IPAddress),
ListenSSH: types.BoolValue(item.ListenSSH),
MAC: types.StringValue(item.MAC),
MTU: types.Int64Value(int64(item.MTU)),
Name: types.StringValue(item.Name),
NetID: types.Int64Value(int64(item.NetID)),
NetMask: types.Int64Value(int64(item.NetMask)),
NetType: types.StringValue(item.NetType),
NodeID: types.Int64Value(int64(item.NodeID)),
PCISlot: types.Int64Value(item.PCISlot),
QOS: flattenQOS(ctx, &item.QOS),
LibvirtSettings: flattenLibvirtSetttings(ctx, &item.LibvirtSettings),
Target: types.StringValue(item.Target),
Type: types.StringValue(item.Type),
VNFs: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, item.VNFs),
}
obj, err := types.ObjectValueFrom(ctx, models.ItemInterfaces, temp)
if err != nil {
@@ -439,6 +445,27 @@ func flattenQOS(ctx context.Context, QOS *compute.QOS) types.Object {
return res
}
func flattenLibvirtSetttings(ctx context.Context, settings *compute.LibvirtSettings) types.Object {
tflog.Info(ctx, "Start flattenLibvirtSetttings")
temp := models.LibvirtModel{
GUID: types.StringValue(settings.GUID),
TXMode: types.StringValue(settings.TXMode),
IOEventFD: types.StringValue(settings.IOEventFD),
EventIDx: types.StringValue(settings.EventIDx),
Queues: types.Int64Value(int64(settings.Queues)),
RXQueueSize: types.Int64Value(int64(settings.RXQueueSize)),
TXQueueSize: types.Int64Value(int64(settings.TXQueueSize)),
}
res, err := types.ObjectValueFrom(ctx, models.ItemLibvirtSettings, temp)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenLibvirtSetttings struct to obj", err))
}
tflog.Info(ctx, "End flattenLibvirtSetttings")
return res
}
func flattenSnapSets(ctx context.Context, snapSets *compute.ListSnapSets) types.List {
tflog.Info(ctx, "Start flattenSnapSets")
tempSlice := make([]types.Object, 0, len(*snapSets))
@@ -465,7 +492,7 @@ func flattenSnapSets(ctx context.Context, snapSets *compute.ListSnapSets) types.
return res
}
func flattenNetwork(ctx context.Context, interfaces *compute.ListInterfaces) types.Set {
func flattenNetwork(ctx context.Context, networks types.Set, interfaces *compute.ListInterfaces) types.Set {
tflog.Info(ctx, "Start flattenNetwork")
tempSlice := make([]types.Object, 0, len(*interfaces))
@@ -476,6 +503,8 @@ func flattenNetwork(ctx context.Context, interfaces *compute.ListInterfaces) typ
NetId: types.Int64Value(int64(item.NetID)),
IpAddress: types.StringValue(item.IPAddress),
Mac: types.StringValue(item.MAC),
Weight: flattenNetworkWeight(ctx, networks, item),
MTU: types.Int64Value(int64(item.MTU)),
}
obj, err := types.ObjectValueFrom(ctx, models.ItemNetwork, temp)
if err != nil {
@@ -492,3 +521,16 @@ func flattenNetwork(ctx context.Context, interfaces *compute.ListInterfaces) typ
tflog.Info(ctx, "End flattenNetwork")
return res
}
func flattenNetworkWeight(ctx context.Context, networks types.Set, item compute.ItemVNFInterface) types.Int64 {
tflog.Info(ctx, "Start flattenNetworkWeight")
networkList := networks.Elements()
for _, network := range networkList {
networkMap := network.(types.Object).Attributes()
if uint64(networkMap["net_id"].(types.Int64).ValueInt64()) == item.NetID && networkMap["net_type"].(types.String).ValueString() == item.NetType {
return types.Int64Value(networkMap["weight"].(types.Int64).ValueInt64())
}
}
tflog.Info(ctx, "End flattenNetworkWeight")
return types.Int64Value(0)
}

View File

@@ -59,14 +59,17 @@ func resourceComputeInputChecks(ctx context.Context, plan *models.ResourceComput
diags.AddError(fmt.Sprintf("Cannot create compute because extnet ID %d is not allowed or does not exist", extNetId), err.Error())
}
case "VFNIC":
if strings.EqualFold(plan.Driver.ValueString(), "KVM_PPC") {
diags.AddError("can't create compute because 'VFNIC' net_type is not allowed for driver 'KVM_PPC'", "")
}
vfpoolId := uint64(elemMap["net_id"].(types.Int64).ValueInt64())
err = ic.ExistVFPool(ctx, vfpoolId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot create compute because vfpool ID %d is not allowed or does not exist", vfpoolId), err.Error())
}
case "DPDK":
dpdkId := uint64(elemMap["net_id"].(types.Int64).ValueInt64())
err = ic.ExistDPDK(ctx, dpdkId, c)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot create compute because DPDK net ID %d is not allowed or does not exist", dpdkId), err.Error())
}
}
}
}

View File

@@ -19,6 +19,7 @@ type RecordComputeModel struct {
AffinityWeight types.Int64 `tfsdk:"affinity_weight"`
AntiAffinityRules []ItemRuleModel `tfsdk:"anti_affinity_rules"`
Architecture types.String `tfsdk:"arch"`
Chipset types.String `tfsdk:"chipset"`
BootOrder types.List `tfsdk:"boot_order"`
BootDiskSize types.Int64 `tfsdk:"bootdisk_size"`
CdImageId types.Int64 `tfsdk:"cd_image_id"`
@@ -57,6 +58,7 @@ type RecordComputeModel struct {
NumaAffinity types.String `tfsdk:"numa_affinity"`
NumaNodeId types.Int64 `tfsdk:"numa_node_id"`
OSUsers []ItemOSUserModel `tfsdk:"os_users"`
PCIDevices types.List `tfsdk:"pci_devices"`
Pinned types.Bool `tfsdk:"pinned"`
RAM types.Int64 `tfsdk:"ram"`
ReferenceID types.String `tfsdk:"reference_id"`
@@ -109,6 +111,7 @@ type ItemDiskModel struct {
ACL types.String `tfsdk:"acl"`
AccountID types.Int64 `tfsdk:"account_id"`
BootPartition types.Int64 `tfsdk:"boot_partition"`
BusNumber types.Int64 `tfsdk:"bus_number"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Description types.String `tfsdk:"desc"`
@@ -149,25 +152,28 @@ type ItemDiskModel struct {
}
type ItemInterfaceModel struct {
ConnID types.Int64 `tfsdk:"conn_id"`
ConnType types.String `tfsdk:"conn_type"`
DefGW types.String `tfsdk:"def_gw"`
Enabled types.Bool `tfsdk:"enabled"`
FLIPGroupID types.Int64 `tfsdk:"flip_group_id"`
GUID types.String `tfsdk:"guid"`
IPAddress types.String `tfsdk:"ip_address"`
ListenSSH types.Bool `tfsdk:"listen_ssh"`
MAC types.String `tfsdk:"mac"`
Name types.String `tfsdk:"name"`
NetID types.Int64 `tfsdk:"net_id"`
NetMask types.Int64 `tfsdk:"netmask"`
NetType types.String `tfsdk:"net_type"`
NodeID types.Int64 `tfsdk:"node_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
QOS *QOSModel `tfsdk:"qos"`
Target types.String `tfsdk:"target"`
Type types.String `tfsdk:"type"`
VNFs types.List `tfsdk:"vnfs"`
BusNumber types.Int64 `tfsdk:"bus_number"`
ConnID types.Int64 `tfsdk:"conn_id"`
ConnType types.String `tfsdk:"conn_type"`
DefGW types.String `tfsdk:"def_gw"`
Enabled types.Bool `tfsdk:"enabled"`
FLIPGroupID types.Int64 `tfsdk:"flip_group_id"`
GUID types.String `tfsdk:"guid"`
IPAddress types.String `tfsdk:"ip_address"`
LibvirtSettings *LibvirtModel `tfsdk:"libvirt_settings"`
ListenSSH types.Bool `tfsdk:"listen_ssh"`
MAC types.String `tfsdk:"mac"`
MTU types.Int64 `tfsdk:"mtu"`
Name types.String `tfsdk:"name"`
NetID types.Int64 `tfsdk:"net_id"`
NetMask types.Int64 `tfsdk:"netmask"`
NetType types.String `tfsdk:"net_type"`
NodeID types.Int64 `tfsdk:"node_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
QOS *QOSModel `tfsdk:"qos"`
Target types.String `tfsdk:"target"`
Type types.String `tfsdk:"type"`
VNFs types.List `tfsdk:"vnfs"`
}
type QOSModel struct {
@@ -177,6 +183,16 @@ type QOSModel struct {
InRate types.Int64 `tfsdk:"in_rate"`
}
type LibvirtModel struct {
GUID types.String `tfsdk:"guid"`
TXMode types.String `tfsdk:"txmode"`
IOEventFD types.String `tfsdk:"ioeventfd"`
EventIDx types.String `tfsdk:"event_idx"`
Queues types.Int64 `tfsdk:"queues"`
RXQueueSize types.Int64 `tfsdk:"rx_queue_size"`
TXQueueSize types.Int64 `tfsdk:"tx_queue_size"`
}
type ItemSnapSetModel struct {
Disks types.List `tfsdk:"disks"`
GUID types.String `tfsdk:"guid"`

View File

@@ -43,6 +43,7 @@ type ItemComputeModel struct {
CdImageId types.Int64 `tfsdk:"cd_image_id"`
CloneReference types.Int64 `tfsdk:"clone_reference"`
Clones types.List `tfsdk:"clones"`
Chipset types.String `tfsdk:"chipset"`
ComputeCIID types.Int64 `tfsdk:"computeci_id"`
CPU types.Int64 `tfsdk:"cpus"`
CPUPin types.Bool `tfsdk:"cpu_pin"`
@@ -112,30 +113,33 @@ type ItemRuleInListModel struct {
}
type DiskInListModel struct {
DiskId types.Int64 `tfsdk:"disk_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
BusNumber types.Int64 `tfsdk:"bus_number"`
DiskId types.Int64 `tfsdk:"disk_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
}
type ItemVNFInterfaceInListModel struct {
ConnID types.Int64 `tfsdk:"conn_id"`
ConnType types.String `tfsdk:"conn_type"`
DefGW types.String `tfsdk:"def_gw"`
Enabled types.Bool `tfsdk:"enabled"`
FLIPGroupID types.Int64 `tfsdk:"flip_group_id"`
GUID types.String `tfsdk:"guid"`
IPAddress types.String `tfsdk:"ip_address"`
ListenSSH types.Bool `tfsdk:"listen_ssh"`
MAC types.String `tfsdk:"mac"`
Name types.String `tfsdk:"name"`
NetID types.Int64 `tfsdk:"net_id"`
NetMask types.Int64 `tfsdk:"netmask"`
NetType types.String `tfsdk:"net_type"`
NodeID types.Int64 `tfsdk:"node_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
QOS *QOSInListModel `tfsdk:"qos"`
Target types.String `tfsdk:"target"`
Type types.String `tfsdk:"type"`
VNFs types.List `tfsdk:"vnfs"`
BusNumber types.Int64 `tfsdk:"bus_number"`
ConnID types.Int64 `tfsdk:"conn_id"`
ConnType types.String `tfsdk:"conn_type"`
DefGW types.String `tfsdk:"def_gw"`
Enabled types.Bool `tfsdk:"enabled"`
FLIPGroupID types.Int64 `tfsdk:"flip_group_id"`
GUID types.String `tfsdk:"guid"`
IPAddress types.String `tfsdk:"ip_address"`
LibvirtSettings *LibvirtModel `tfsdk:"libvirt_settings"`
ListenSSH types.Bool `tfsdk:"listen_ssh"`
MAC types.String `tfsdk:"mac"`
Name types.String `tfsdk:"name"`
NetID types.Int64 `tfsdk:"net_id"`
NetMask types.Int64 `tfsdk:"netmask"`
NetType types.String `tfsdk:"net_type"`
NodeID types.Int64 `tfsdk:"node_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
QOS *QOSInListModel `tfsdk:"qos"`
Target types.String `tfsdk:"target"`
Type types.String `tfsdk:"type"`
VNFs types.List `tfsdk:"vnfs"`
}
type QOSInListModel struct {

View File

@@ -39,6 +39,7 @@ type ItemListDeletedComputeModel struct {
BootOrder types.List `tfsdk:"boot_order"`
BootDiskSize types.Int64 `tfsdk:"bootdisk_size"`
CdImageId types.Int64 `tfsdk:"cd_image_id"`
Chipset types.String `tfsdk:"chipset"`
CloneReference types.Int64 `tfsdk:"clone_reference"`
Clones types.List `tfsdk:"clones"`
ComputeCIID types.Int64 `tfsdk:"computeci_id"`
@@ -110,30 +111,33 @@ type ItemRuleInListDeletedModel struct {
}
type DiskInListDeletedModel struct {
DiskId types.Int64 `tfsdk:"disk_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
BusNumber types.Int64 `tfsdk:"bus_number"`
DiskId types.Int64 `tfsdk:"disk_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
}
type ItemVNFInterfaceInListDeletedModel struct {
ConnID types.Int64 `tfsdk:"conn_id"`
ConnType types.String `tfsdk:"conn_type"`
DefGW types.String `tfsdk:"def_gw"`
Enabled types.Bool `tfsdk:"enabled"`
FLIPGroupID types.Int64 `tfsdk:"flip_group_id"`
GUID types.String `tfsdk:"guid"`
IPAddress types.String `tfsdk:"ip_address"`
ListenSSH types.Bool `tfsdk:"listen_ssh"`
MAC types.String `tfsdk:"mac"`
Name types.String `tfsdk:"name"`
NetID types.Int64 `tfsdk:"net_id"`
NetMask types.Int64 `tfsdk:"netmask"`
NetType types.String `tfsdk:"net_type"`
NodeID types.Int64 `tfsdk:"node_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
QOS *QOSInListModel `tfsdk:"qos"`
Target types.String `tfsdk:"target"`
Type types.String `tfsdk:"type"`
VNFs types.List `tfsdk:"vnfs"`
BusNumber types.Int64 `tfsdk:"bus_number"`
ConnID types.Int64 `tfsdk:"conn_id"`
ConnType types.String `tfsdk:"conn_type"`
DefGW types.String `tfsdk:"def_gw"`
Enabled types.Bool `tfsdk:"enabled"`
FLIPGroupID types.Int64 `tfsdk:"flip_group_id"`
GUID types.String `tfsdk:"guid"`
IPAddress types.String `tfsdk:"ip_address"`
LibvirtSettings *LibvirtModel `tfsdk:"libvirt_settings"`
ListenSSH types.Bool `tfsdk:"listen_ssh"`
MAC types.String `tfsdk:"mac"`
Name types.String `tfsdk:"name"`
NetID types.Int64 `tfsdk:"net_id"`
NetMask types.Int64 `tfsdk:"netmask"`
NetType types.String `tfsdk:"net_type"`
NodeID types.Int64 `tfsdk:"node_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
QOS *QOSInListModel `tfsdk:"qos"`
Target types.String `tfsdk:"target"`
Type types.String `tfsdk:"type"`
VNFs types.List `tfsdk:"vnfs"`
}
type QOSInListDeletedModel struct {

View File

@@ -23,6 +23,7 @@ type ResourceComputeModel struct {
AffinityRules types.Set `tfsdk:"affinity_rules"`
AntiAffinityRules types.Set `tfsdk:"anti_affinity_rules"`
CustomFields types.String `tfsdk:"custom_fields"`
Chipset types.String `tfsdk:"chipset"`
Stateless types.Bool `tfsdk:"stateless"`
SepId types.Int64 `tfsdk:"sep_id"`
Pool types.String `tfsdk:"pool"`
@@ -32,6 +33,7 @@ type ResourceComputeModel struct {
PortForwarding types.Set `tfsdk:"port_forwarding"`
UserAccess types.Set `tfsdk:"user_access"`
Snapshot types.Set `tfsdk:"snapshot"`
PCIDevices types.Set `tfsdk:"pci_devices"`
Rollback types.Object `tfsdk:"rollback"`
CD types.Object `tfsdk:"cd"`
PinToStack types.Bool `tfsdk:"pin_to_stack"`
@@ -124,6 +126,7 @@ type ItemResourceDiskModel struct {
ACL types.String `tfsdk:"acl"`
AccountID types.Int64 `tfsdk:"account_id"`
BootPartition types.Int64 `tfsdk:"boot_partition"`
BusNumber types.Int64 `tfsdk:"bus_number"`
CreatedTime types.Int64 `tfsdk:"created_time"`
DeletedTime types.Int64 `tfsdk:"deleted_time"`
Description types.String `tfsdk:"desc"`
@@ -175,28 +178,33 @@ type ItemNetworkModel struct {
NetId types.Int64 `tfsdk:"net_id"`
IpAddress types.String `tfsdk:"ip_address"`
Mac types.String `tfsdk:"mac"`
Weight types.Int64 `tfsdk:"weight"`
MTU types.Int64 `tfsdk:"mtu"`
}
type ItemResourceInterfacesModel struct {
ConnID types.Int64 `tfsdk:"conn_id"`
ConnType types.String `tfsdk:"conn_type"`
GetGW types.String `tfsdk:"get_gw"`
Enabled types.Bool `tfsdk:"enabled"`
FLIPGroupID types.Int64 `tfsdk:"flip_group_id"`
GUID types.String `tfsdk:"guid"`
IPAddress types.String `tfsdk:"ip_address"`
ListenSSH types.Bool `tfsdk:"listen_ssh"`
MAC types.String `tfsdk:"mac"`
Name types.String `tfsdk:"name"`
NetID types.Int64 `tfsdk:"net_id"`
NetMask types.Int64 `tfsdk:"netmask"`
NetType types.String `tfsdk:"net_type"`
NodeID types.Int64 `tfsdk:"node_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
QOS types.Object `tfsdk:"qos"`
Target types.String `tfsdk:"target"`
Type types.String `tfsdk:"type"`
VNFs types.List `tfsdk:"vnfs"`
BusNumber types.Int64 `tfsdk:"bus_number"`
ConnID types.Int64 `tfsdk:"conn_id"`
ConnType types.String `tfsdk:"conn_type"`
GetGW types.String `tfsdk:"get_gw"`
Enabled types.Bool `tfsdk:"enabled"`
FLIPGroupID types.Int64 `tfsdk:"flip_group_id"`
GUID types.String `tfsdk:"guid"`
IPAddress types.String `tfsdk:"ip_address"`
ListenSSH types.Bool `tfsdk:"listen_ssh"`
MAC types.String `tfsdk:"mac"`
MTU types.Int64 `tfsdk:"mtu"`
Name types.String `tfsdk:"name"`
NetID types.Int64 `tfsdk:"net_id"`
NetMask types.Int64 `tfsdk:"netmask"`
NetType types.String `tfsdk:"net_type"`
NodeID types.Int64 `tfsdk:"node_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
QOS types.Object `tfsdk:"qos"`
LibvirtSettings types.Object `tfsdk:"libvirt_settings"`
Target types.String `tfsdk:"target"`
Type types.String `tfsdk:"type"`
VNFs types.List `tfsdk:"vnfs"`
}
type ItemResourceRulesModel struct {
@@ -212,6 +220,8 @@ var ItemNetwork = map[string]attr.Type{
"net_id": types.Int64Type,
"ip_address": types.StringType,
"mac": types.StringType,
"weight": types.Int64Type,
"mtu": types.Int64Type,
}
var ItemDisk = map[string]attr.Type{
@@ -219,6 +229,7 @@ var ItemDisk = map[string]attr.Type{
"acl": types.StringType,
"account_id": types.Int64Type,
"boot_partition": types.Int64Type,
"bus_number": types.Int64Type,
"created_time": types.Int64Type,
"deleted_time": types.Int64Type,
"desc": types.StringType,
@@ -293,25 +304,28 @@ var ItemACL = map[string]attr.Type{
}
var ItemInterfaces = map[string]attr.Type{
"conn_id": types.Int64Type,
"conn_type": types.StringType,
"get_gw": types.StringType,
"enabled": types.BoolType,
"flip_group_id": types.Int64Type,
"guid": types.StringType,
"ip_address": types.StringType,
"listen_ssh": types.BoolType,
"mac": types.StringType,
"name": types.StringType,
"net_id": types.Int64Type,
"netmask": types.Int64Type,
"net_type": types.StringType,
"node_id": types.Int64Type,
"pci_slot": types.Int64Type,
"qos": types.ObjectType{AttrTypes: ItemQos},
"target": types.StringType,
"type": types.StringType,
"vnfs": types.ListType{ElemType: types.Int64Type},
"bus_number": types.Int64Type,
"conn_id": types.Int64Type,
"conn_type": types.StringType,
"get_gw": types.StringType,
"enabled": types.BoolType,
"flip_group_id": types.Int64Type,
"guid": types.StringType,
"ip_address": types.StringType,
"listen_ssh": types.BoolType,
"mac": types.StringType,
"mtu": types.Int64Type,
"name": types.StringType,
"net_id": types.Int64Type,
"netmask": types.Int64Type,
"net_type": types.StringType,
"node_id": types.Int64Type,
"pci_slot": types.Int64Type,
"qos": types.ObjectType{AttrTypes: ItemQos},
"libvirt_settings": types.ObjectType{AttrTypes: ItemLibvirtSettings},
"target": types.StringType,
"type": types.StringType,
"vnfs": types.ListType{ElemType: types.Int64Type},
}
var ItemQos = map[string]attr.Type{
@@ -321,6 +335,16 @@ var ItemQos = map[string]attr.Type{
"in_rate": types.Int64Type,
}
var ItemLibvirtSettings = map[string]attr.Type{
"guid": types.StringType,
"txmode": types.StringType,
"ioeventfd": types.StringType,
"event_idx": types.StringType,
"queues": types.Int64Type,
"rx_queue_size": types.Int64Type,
"tx_queue_size": types.Int64Type,
}
var ItemOSUsers = map[string]attr.Type{
"guid": types.StringType,
"login": types.StringType,

View File

@@ -142,6 +142,11 @@ func (r *resourceCompute) Create(ctx context.Context, req resource.CreateRequest
resp.Diagnostics.Append(utilities.ComputeResourceCDInsert(ctx, &plan, r.client)...)
}
// attach PCI devices to compute if needed, warnings added to resp.Diagnostics in case of failure.
if !plan.PCIDevices.IsNull() {
resp.Diagnostics.Append(utilities.ComputeResourcePCIDevice(ctx, &plan, r.client)...)
}
// pin to stack if needed, warnings added to resp.Diagnostics in case of failure.
if !plan.PinToStack.IsNull() && plan.PinToStack.ValueBool() {
resp.Diagnostics.Append(utilities.ComputeResourcePinToStack(ctx, &plan, r.client)...)
@@ -319,9 +324,18 @@ func (r *resourceCompute) Update(ctx context.Context, req resource.UpdateRequest
}
}
// PCI device(s) update if needed
if !plan.PCIDevices.Equal(state.PCIDevices) {
resp.Diagnostics.Append(utilities.ComputeResourcePCIDeviceUpdate(ctx, &state, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error update PCI device(s) list")
return
}
}
// Compute parameters update if needed
if (!plan.Description.IsUnknown() && !plan.Description.Equal(state.Description)) || !plan.Name.Equal(state.Name) ||
!plan.NumaAffinity.Equal(state.NumaAffinity) || !plan.CPUPin.Equal(state.CPUPin) || !plan.HPBacked.Equal(state.HPBacked) {
!plan.NumaAffinity.Equal(state.NumaAffinity) || !plan.CPUPin.Equal(state.CPUPin) || !plan.HPBacked.Equal(state.HPBacked) || (!plan.Chipset.IsUnknown() && !plan.Chipset.Equal(state.Chipset)) {
resp.Diagnostics.Append(utilities.ComputeResourceComputeUpdate(ctx, &state, &plan, r.client)...)
if resp.Diagnostics.HasError() {
tflog.Error(ctx, "Update resourceCompute: Error update compute parameters")
@@ -494,25 +508,11 @@ func (r *resourceCompute) Delete(ctx context.Context, req resource.DeleteRequest
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
var permanently bool
if state.Permanently.IsNull() {
permanently = true
} else {
permanently = state.Permanently.ValueBool()
}
var detach bool
if state.DetachDisks.IsNull() {
detach = true
} else {
detach = state.DetachDisks.ValueBool()
}
// Delete existing Compute
delReq := compute.DeleteRequest{
ComputeID: uint64(state.ComputeId.ValueInt64()),
Permanently: permanently,
DetachDisks: detach,
Permanently: state.Permanently.ValueBool(),
DetachDisks: state.DetachDisks.ValueBool(),
}
tflog.Info(ctx, "Delete resourceCompute: calling CloudAPI().Compute().Delete", map[string]any{

View File

@@ -160,6 +160,9 @@ func MakeSchemaDataSourceCompute() map[string]schema.Attribute {
"arch": schema.StringAttribute{
Computed: true,
},
"chipset": schema.StringAttribute{
Computed: true,
},
"boot_order": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
@@ -223,6 +226,9 @@ func MakeSchemaDataSourceCompute() map[string]schema.Attribute {
"boot_partition": schema.Int64Attribute{
Computed: true,
},
"bus_number": schema.Int64Attribute{
Computed: true,
},
"created_time": schema.Int64Attribute{
Computed: true,
},
@@ -447,6 +453,9 @@ func MakeSchemaDataSourceCompute() map[string]schema.Attribute {
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"bus_number": schema.Int64Attribute{
Computed: true,
},
"conn_id": schema.Int64Attribute{
Computed: true,
},
@@ -468,12 +477,41 @@ func MakeSchemaDataSourceCompute() map[string]schema.Attribute {
"ip_address": schema.StringAttribute{
Computed: true,
},
"libvirt_settings": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"txmode": schema.StringAttribute{
Computed: true,
},
"ioeventfd": schema.StringAttribute{
Computed: true,
},
"event_idx": schema.StringAttribute{
Computed: true,
},
"queues": schema.Int64Attribute{
Computed: true,
},
"rx_queue_size": schema.Int64Attribute{
Computed: true,
},
"tx_queue_size": schema.Int64Attribute{
Computed: true,
},
},
},
"listen_ssh": schema.BoolAttribute{
Computed: true,
},
"mac": schema.StringAttribute{
Computed: true,
},
"mtu": schema.Int64Attribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
@@ -583,6 +621,10 @@ func MakeSchemaDataSourceCompute() map[string]schema.Attribute {
},
},
},
"pci_devices": schema.ListAttribute{
Computed: true,
ElementType: types.Int64Type,
},
"pinned": schema.BoolAttribute{
Computed: true,
},

View File

@@ -166,6 +166,9 @@ func MakeSchemaDataSourceComputeList() map[string]schema.Attribute {
"arch": schema.StringAttribute{
Computed: true,
},
"chipset": schema.StringAttribute{
Computed: true,
},
"boot_order": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
@@ -217,6 +220,9 @@ func MakeSchemaDataSourceComputeList() map[string]schema.Attribute {
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"bus_number": schema.Int64Attribute{
Computed: true,
},
"disk_id": schema.Int64Attribute{
Computed: true,
},
@@ -248,6 +254,9 @@ func MakeSchemaDataSourceComputeList() map[string]schema.Attribute {
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"bus_number": schema.Int64Attribute{
Computed: true,
},
"conn_id": schema.Int64Attribute{
Computed: true,
},
@@ -310,6 +319,32 @@ func MakeSchemaDataSourceComputeList() map[string]schema.Attribute {
},
},
},
"libvirt_settings": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"txmode": schema.StringAttribute{
Computed: true,
},
"ioeventfd": schema.StringAttribute{
Computed: true,
},
"event_idx": schema.StringAttribute{
Computed: true,
},
"queues": schema.Int64Attribute{
Computed: true,
},
"rx_queue_size": schema.Int64Attribute{
Computed: true,
},
"tx_queue_size": schema.Int64Attribute{
Computed: true,
},
},
},
"target": schema.StringAttribute{
Computed: true,
},

View File

@@ -158,6 +158,9 @@ func MakeSchemaDataSourceComputeListDeleted() map[string]schema.Attribute {
"arch": schema.StringAttribute{
Computed: true,
},
"chipset": schema.StringAttribute{
Computed: true,
},
"boot_order": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
@@ -209,6 +212,9 @@ func MakeSchemaDataSourceComputeListDeleted() map[string]schema.Attribute {
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"bus_number": schema.Int64Attribute{
Computed: true,
},
"disk_id": schema.Int64Attribute{
Computed: true,
},
@@ -240,6 +246,9 @@ func MakeSchemaDataSourceComputeListDeleted() map[string]schema.Attribute {
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"bus_number": schema.Int64Attribute{
Computed: true,
},
"conn_id": schema.Int64Attribute{
Computed: true,
},
@@ -261,6 +270,32 @@ func MakeSchemaDataSourceComputeListDeleted() map[string]schema.Attribute {
"ip_address": schema.StringAttribute{
Computed: true,
},
"libvirt_settings": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"txmode": schema.StringAttribute{
Computed: true,
},
"ioeventfd": schema.StringAttribute{
Computed: true,
},
"event_idx": schema.StringAttribute{
Computed: true,
},
"queues": schema.Int64Attribute{
Computed: true,
},
"rx_queue_size": schema.Int64Attribute{
Computed: true,
},
"tx_queue_size": schema.Int64Attribute{
Computed: true,
},
},
},
"listen_ssh": schema.BoolAttribute{
Computed: true,
},

View File

@@ -5,6 +5,7 @@ import (
"github.com/hashicorp/terraform-plugin-framework-validators/setvalidator"
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
@@ -30,7 +31,7 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute {
"driver": schema.StringAttribute{
Required: true,
Validators: []validator.String{
stringvalidator.OneOf("SVA_KVM_X86", "KVM_X86", "KVM_PPC"),
stringvalidator.OneOf("SVA_KVM_X86", "KVM_X86"),
},
Description: "Hardware architecture of this compute instance.",
},
@@ -180,7 +181,7 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute {
"net_type": schema.StringAttribute{
Required: true,
Validators: []validator.String{
stringvalidator.OneOf("EXTNET", "VINS", "VFNIC"),
stringvalidator.OneOf("EXTNET", "VINS", "VFNIC", "DPDK"),
},
Description: "Type of the network for this connection, either EXTNET or VINS.",
},
@@ -197,6 +198,19 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute {
Computed: true,
Description: "MAC address associated with this connection. MAC address is assigned automatically.",
},
"weight": schema.Int64Attribute{
Optional: true,
Computed: true,
Description: "Weight the network if you need to sort network list, the smallest attach first. zero or null weight attach last",
},
"mtu": schema.Int64Attribute{
Optional: true,
Computed: true,
Validators: []validator.Int64{
int64validator.Between(1, 9216),
},
Description: "Maximum transmission unit, used only for DPDK type, must be 1-9216",
},
},
},
},
@@ -289,6 +303,8 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute {
},
"enabled": schema.BoolAttribute{
Optional: true,
Computed: true,
Default: booldefault.StaticBool(true),
Description: "If true - enable compute, else - disable",
},
"pause": schema.BoolAttribute{
@@ -301,6 +317,8 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute {
},
"restore": schema.BoolAttribute{
Optional: true,
Computed: true,
Default: booldefault.StaticBool(true),
//Default: true,
},
"auto_start": schema.BoolAttribute{
@@ -328,15 +346,21 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute {
},
"started": schema.BoolAttribute{
Optional: true,
Computed: true,
Default: booldefault.StaticBool(true),
//Default: true,
Description: "Is compute started.",
},
"detach_disks": schema.BoolAttribute{
Optional: true,
Computed: true,
Default: booldefault.StaticBool(true),
//Default: true,
},
"permanently": schema.BoolAttribute{
Optional: true,
Computed: true,
Default: booldefault.StaticBool(true),
//Default: true,
},
"is": schema.StringAttribute{
@@ -365,6 +389,19 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute {
//Default: false,
Description: "Use Huge Pages to allocate RAM of the virtual machine. The system must be pre-configured by allocating Huge Pages on the physical node.",
},
"pci_devices": schema.SetAttribute{
Optional: true,
ElementType: types.Int64Type,
Description: "ID of the connected pci devices",
},
"chipset": schema.StringAttribute{
Optional: true,
Computed: true,
Validators: []validator.String{
stringvalidator.OneOf("i440fx", "Q35"),
},
Description: "Type of the emulated system, Q35 or i440fx",
},
// computed attributes
"compute_id": schema.Int64Attribute{
@@ -528,6 +565,9 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute {
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"bus_number": schema.Int64Attribute{
Computed: true,
},
"conn_id": schema.Int64Attribute{
Computed: true,
},
@@ -555,6 +595,9 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute {
"mac": schema.StringAttribute{
Computed: true,
},
"mtu": schema.Int64Attribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
@@ -590,6 +633,32 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute {
},
},
},
"libvirt_settings": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"guid": schema.StringAttribute{
Computed: true,
},
"txmode": schema.StringAttribute{
Computed: true,
},
"ioeventfd": schema.StringAttribute{
Computed: true,
},
"event_idx": schema.StringAttribute{
Computed: true,
},
"queues": schema.Int64Attribute{
Computed: true,
},
"rx_queue_size": schema.Int64Attribute{
Computed: true,
},
"tx_queue_size": schema.Int64Attribute{
Computed: true,
},
},
},
"target": schema.StringAttribute{
Computed: true,
},
@@ -749,6 +818,9 @@ func MakeSchemaResourceComputeDisks() map[string]schema.Attribute {
"boot_partition": schema.Int64Attribute{
Computed: true,
},
"bus_number": schema.Int64Attribute{
Computed: true,
},
"created_time": schema.Int64Attribute{
Computed: true,
},

View File

@@ -26,3 +26,21 @@ func ComputeCheckPresence(ctx context.Context, state *models.RecordComputeModel,
tflog.Info(ctx, "Getting compute info, successfully")
return computeRecord, nil
}
func ComputePCIDevicesListCheckPresence(ctx context.Context, state *models.RecordComputeModel, c *decort.DecortClient) (*compute.ListPCIDevices, error) {
tflog.Info(ctx, "Get PCI devices info")
req := compute.ListPCIDeviceRequest{
ComputeID: uint64(state.ComputeId.ValueInt64()),
}
tflog.Info(ctx, "Check req", map[string]any{"req": req})
pciDevicesList, err := c.CloudAPI().Compute().ListPCIDevice(ctx, req)
if err != nil {
return nil, err
}
tflog.Info(ctx, "Getting PCI devices info, successfully")
return pciDevicesList, nil
}

View File

@@ -3,6 +3,7 @@ package utilities
import (
"context"
"fmt"
"sort"
"strconv"
"strings"
@@ -11,7 +12,6 @@ import (
"github.com/hashicorp/terraform-plugin-log/tflog"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/kvmppc"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/kvmx86"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status"
@@ -81,42 +81,50 @@ func CreateResourceCompute(ctx context.Context, plan *models.ResourceComputeMode
diags := diag.Diagnostics{}
createReqX86 := kvmx86.CreateRequest{Start: false}
createReqPPC := kvmppc.CreateRequest{Start: false}
if !plan.Description.IsUnknown() {
createReqPPC.Description = plan.Description.ValueString()
createReqX86.Description = plan.Description.ValueString()
}
if !plan.SepId.IsUnknown() {
createReqPPC.SEPID = uint64(plan.SepId.ValueInt64())
createReqX86.SepID = uint64(plan.SepId.ValueInt64())
}
if !plan.Pool.IsUnknown() {
createReqPPC.Pool = plan.Pool.ValueString()
createReqX86.Pool = plan.Pool.ValueString()
}
if !plan.IpaType.IsNull() {
createReqPPC.IPAType = plan.IpaType.ValueString()
createReqX86.IPAType = plan.IpaType.ValueString()
}
if !plan.BootDiskSize.IsNull() {
createReqPPC.BootDisk = uint64(plan.BootDiskSize.ValueInt64())
createReqX86.BootDisk = uint64(plan.BootDiskSize.ValueInt64())
}
if !plan.IS.IsNull() {
createReqPPC.IS = plan.IS.ValueString()
createReqX86.IS = plan.IS.ValueString()
}
if !plan.Chipset.IsUnknown() {
createReqX86.Chipset = plan.Chipset.ValueString()
}
createReqX86.Interfaces = make([]kvmx86.Interface, 0)
if !plan.Network.IsNull() {
networkList := plan.Network.Elements()
sort.Slice(networkList, func(i, j int) bool {
weightI := networkList[i].(types.Object).Attributes()["weight"].(types.Int64).ValueInt64()
weightJ := networkList[j].(types.Object).Attributes()["weight"].(types.Int64).ValueInt64()
if weightI == 0 {
return false
}
if weightJ == 0 {
return true
}
return weightI < weightJ
})
interfaces := make([]kvmx86.Interface, 0)
for _, elem := range networkList {
objVal := elem.(types.Object)
@@ -125,108 +133,72 @@ func CreateResourceCompute(ctx context.Context, plan *models.ResourceComputeMode
NetType: strings.ToUpper(elemMap["net_type"].(types.String).ValueString()),
NetID: uint64(elemMap["net_id"].(types.Int64).ValueInt64()),
}
if reqInterface.NetType == "DPDK" {
reqInterface.MTU = uint64(elemMap["mtu"].(types.Int64).ValueInt64())
}
ipaddr, ipSet := elemMap["ip_address"]
if ipSet {
reqInterface.IPAddr = ipaddr.(types.String).ValueString()
}
interfaces = append(interfaces, reqInterface)
}
createReqX86.Interfaces = interfaces
}
createReqPPC.Interfaces = make([]kvmppc.Interface, 0)
if !plan.Network.IsNull() {
networkList := plan.Network.Elements()
interfaces := make([]kvmppc.Interface, 0)
for _, elem := range networkList {
objVal := elem.(types.Object)
elemMap := objVal.Attributes()
reqInterface := kvmppc.Interface{
NetType: strings.ToUpper(elemMap["net_type"].(types.String).ValueString()),
NetID: uint64(elemMap["net_id"].(types.Int64).ValueInt64()),
}
ipaddr, ipSet := elemMap["ip_address"]
if ipSet {
reqInterface.IPAddr = ipaddr.(types.String).ValueString()
}
interfaces = append(interfaces, reqInterface)
}
createReqPPC.Interfaces = interfaces
}
if !plan.CloudInit.IsNull() {
userData := plan.CloudInit.ValueString()
if userData != "" && userData != "applied" {
createReqPPC.Userdata = strings.TrimSpace(userData)
createReqX86.Userdata = strings.TrimSpace(userData)
}
}
driver := strings.ToUpper(plan.Driver.ValueString())
if driver == "KVM_PPC" {
createReqPPC.RGID = uint64(plan.RGID.ValueInt64())
createReqPPC.Name = plan.Name.ValueString()
createReqPPC.CPU = uint64(plan.CPU.ValueInt64())
createReqPPC.RAM = uint64(plan.RAM.ValueInt64())
createReqPPC.ImageID = uint64(plan.ImageID.ValueInt64())
tflog.Info(ctx, fmt.Sprintf("CreateResourceCompute: creating Compute of type KVM VM PowerPC"))
id, err := c.CloudAPI().KVMPPC().Create(ctx, createReqPPC)
if err != nil {
diags.AddError("CreateResourceCompute: unable to create KVM VP PowerPC", err.Error())
return 0, diags
}
return id, diags
} else {
createReqX86.RGID = uint64(plan.RGID.ValueInt64())
createReqX86.Name = plan.Name.ValueString()
createReqX86.CPU = uint64(plan.CPU.ValueInt64())
createReqX86.RAM = uint64(plan.RAM.ValueInt64())
createReqX86.Driver = driver
createReqX86.RGID = uint64(plan.RGID.ValueInt64())
createReqX86.Name = plan.Name.ValueString()
createReqX86.CPU = uint64(plan.CPU.ValueInt64())
createReqX86.RAM = uint64(plan.RAM.ValueInt64())
createReqX86.Driver = driver
if !plan.ImageID.IsNull() {
createReqX86.ImageID = uint64(plan.ImageID.ValueInt64())
}
if !plan.WithoutBootDisk.IsNull() {
createReqX86.WithoutBootDisk = plan.WithoutBootDisk.ValueBool()
}
if !plan.CustomFields.IsUnknown() { //CustomFields optional && computed
val := plan.CustomFields.ValueString()
val = strings.ReplaceAll(val, "\\", "")
val = strings.ReplaceAll(val, "\n", "")
val = strings.ReplaceAll(val, "\t", "")
val = strings.TrimSpace(val)
createReqX86.CustomFields = val
}
if !plan.NumaAffinity.IsNull() {
createReqX86.NumaAffinity = strings.ToLower(plan.NumaAffinity.ValueString())
}
if !plan.CPUPin.IsNull() && plan.CPUPin.ValueBool() {
createReqX86.CPUPin = true
}
if !plan.HPBacked.IsNull() && plan.HPBacked.ValueBool() {
createReqX86.HPBacked = true
}
tflog.Info(ctx, fmt.Sprintf("CreateResourceCompute: creating Compute of type KVM VM x86"))
id, err := c.CloudAPI().KVMX86().Create(ctx, createReqX86)
if err != nil {
diags.AddError("CreateResourceCompute: unable to create KVM VP x86", err.Error())
return 0, diags
}
return id, diags
if !plan.ImageID.IsNull() {
createReqX86.ImageID = uint64(plan.ImageID.ValueInt64())
}
if !plan.WithoutBootDisk.IsNull() {
createReqX86.WithoutBootDisk = plan.WithoutBootDisk.ValueBool()
}
if !plan.CustomFields.IsUnknown() { //CustomFields optional && computed
val := plan.CustomFields.ValueString()
val = strings.ReplaceAll(val, "\\", "")
val = strings.ReplaceAll(val, "\n", "")
val = strings.ReplaceAll(val, "\t", "")
val = strings.TrimSpace(val)
createReqX86.CustomFields = val
}
if !plan.NumaAffinity.IsNull() {
createReqX86.NumaAffinity = strings.ToLower(plan.NumaAffinity.ValueString())
}
if !plan.CPUPin.IsNull() && plan.CPUPin.ValueBool() {
createReqX86.CPUPin = true
}
if !plan.HPBacked.IsNull() && plan.HPBacked.ValueBool() {
createReqX86.HPBacked = true
}
tflog.Info(ctx, fmt.Sprintf("CreateResourceCompute: creating Compute of type KVM VM x86"))
id, err := c.CloudAPI().KVMX86().Create(ctx, createReqX86)
if err != nil {
diags.AddError("CreateResourceCompute: unable to create KVM VP x86", err.Error())
return 0, diags
}
return id, diags
}
func CleanupResourceCompute(ctx context.Context, computeId uint64, c *decort.DecortClient) {
@@ -281,16 +253,11 @@ func ComputeResourceEnableDisable(ctx context.Context, plan *models.ResourceComp
return diags
}
var enable bool
if plan.Enabled.IsNull() {
enable = true // default value
} else {
enable = plan.Enabled.ValueBool()
}
enable := plan.Enabled.ValueBool()
tflog.Info(ctx, "ComputeEnableDisable: compute to be enabled/disabled", map[string]any{
"compute_id": computeId,
"enable": enable})
"enable": plan.Enabled.ValueBool()})
if enable {
_, err = c.CloudAPI().Compute().Enable(ctx, compute.EnableRequest{ComputeID: computeId})
@@ -326,12 +293,7 @@ func ComputeResourceStartStop(ctx context.Context, plan *models.ResourceComputeM
return diags
}
var started bool
if plan.Started.IsNull() {
started = true // default value
} else {
started = plan.Started.ValueBool()
}
started := plan.Started.ValueBool()
tflog.Info(ctx, "ComputeStartStop: compute to be started/stopped", map[string]any{
"compute_id": computeId,
@@ -642,6 +604,41 @@ func ComputeResourceSnapshot(ctx context.Context, plan *models.ResourceComputeMo
return nil
}
func ComputeResourcePCIDevice(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeResourcePCIDevice: Start added PCI devices(s) to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("ComputeResourcePCIDevice: cannot parsed ID compute from plan", err.Error())
return diags
}
pciDeviceList := plan.PCIDevices.Elements()
for _, pciDevice := range pciDeviceList {
pciDeviceId := pciDevice.(types.Int64).ValueInt64()
req := compute.AttachPCIDeviceRequest{
ComputeID: computeId,
DeviceID: uint64(pciDeviceId),
}
tflog.Info(ctx, "ComputeResourcePCIDevice: Start attach PCI device to compute with ID", map[string]any{"compute_id": plan.ID.ValueString(), "pci_device_id": pciDeviceId})
res, err := c.CloudAPI().Compute().AttachPCIDevice(ctx, req)
tflog.Info(ctx, "ComputeResourceSnapshot: response from CloudAPI().Compute().AttachPCIDevice", map[string]any{"compute_id": computeId, "response": res})
if err != nil {
diags.AddWarning(
"ComputeResourceSnapshot: Unable to add PCI device for Compute",
err.Error(),
)
}
}
if diags.WarningsCount() != 0 {
return diags
}
tflog.Info(ctx, "ComputeResourcePCIDevice: PCI devices(s) is successfully added", map[string]any{"compute_id": computeId})
return nil
}
func ComputeResourceCDInsert(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeResourceCDInsert: Start added cd to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
@@ -745,7 +742,7 @@ func ComputeReadStatus(ctx context.Context, state *models.ResourceComputeModel,
"compute_id": computeId,
"status": recordCompute.Status})
// restore and enable compute in case it is required
if state.Restore.IsNull() || state.Restore.ValueBool() { // default true or user set-up true
if state.Restore.ValueBool() { // default true or user set-up true
diags.Append(RestoreCompute(ctx, computeId, c)...)
if diags.HasError() {
tflog.Error(ctx, "ComputeReadStatus: cannot restore compute")
@@ -759,14 +756,14 @@ func ComputeReadStatus(ctx context.Context, state *models.ResourceComputeModel,
return diags
}
}
if state.Enabled.IsNull() || state.Enabled.ValueBool() { // default true or user set-up true
if state.Enabled.ValueBool() { // default true or user set-up true
diags.Append(ComputeResourceEnableDisable(ctx, state, c)...)
if diags.HasError() {
tflog.Error(ctx, "ComputeReadStatus: Unable to enable compute")
return diags
}
tflog.Info(ctx, "ComputeReadStatus: compute enabled successfully", map[string]any{"compute_id": computeId})
if state.Started.IsNull() || state.Started.ValueBool() {
if state.Started.ValueBool() {
diags.Append(ComputeResourceStartStop(ctx, state, c)...)
if diags.HasError() {
tflog.Error(ctx, "ComputeReadStatus: Unable to start compute")

View File

@@ -1,6 +1,9 @@
package utilities
import "github.com/hashicorp/terraform-plugin-framework/types"
import (
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/types"
)
// differenceSimpleType returns lists added and removed values
func differenceSimpleType(oldSet, newSet types.Set) (added, removed []any) {
@@ -30,36 +33,52 @@ func differenceSimpleType(oldSet, newSet types.Set) (added, removed []any) {
return
}
func differenceNetwork(oldSet, newSet types.Set) (added, removed []any) {
func differenceNetwork(oldSet, newSet types.Set) (added, changeIp, removed []map[string]attr.Value) {
oldSlice := oldSet.Elements()
newSlice := newSet.Elements()
foundIdx := make([]bool, len(oldSlice))
for _, newElem := range newSlice {
newObj := newElem.(types.Object)
newElemMap := newObj.Attributes()
added = make([]map[string]attr.Value, 0)
changeIp = make([]map[string]attr.Value, 0)
removed = make([]map[string]attr.Value, 0)
for _, oldNetwork := range oldSlice {
oldMap := oldNetwork.(types.Object).Attributes()
found := false
for i, oldElem := range oldSlice {
oldObj := oldElem.(types.Object)
oldElemMap := oldObj.Attributes()
if oldElemMap["net_type"] == newElemMap["net_type"] && oldElemMap["net_id"] == newElemMap["net_id"] {
ipaddr, ipSet := newElemMap["ip_address"]
if !ipSet || ipaddr.(types.String).ValueString() == "" || ipaddr == oldElemMap["ip_address"] {
for _, newNetwork := range newSlice {
newMap := newNetwork.(types.Object).Attributes()
if newMap["net_type"] == oldMap["net_type"] && newMap["net_id"] == oldMap["net_id"] && newMap["weight"] == oldMap["weight"] && (newMap["mtu"] == oldMap["mtu"] || newMap["mtu"].(types.Int64).ValueInt64() == 0) {
if (newMap["net_type"].(types.String).ValueString() == "EXTNET" || newMap["net_type"].(types.String).ValueString() == "VINS") && (newMap["ip_address"] != oldMap["ip_address"] && newMap["ip_address"].(types.String).ValueString() != "") {
changeIp = append(changeIp, newMap)
found = true
break
} else if newMap["ip_address"] == oldMap["ip_address"] || newMap["ip_address"].(types.String).ValueString() != "" {
found = true
foundIdx[i] = true
break
}
}
}
if !found {
added = append(added, newElem)
if found {
continue
}
removed = append(removed, oldMap)
}
for i, found := range foundIdx {
if !found {
removed = append(removed, oldSlice[i])
for _, newNetwork := range newSlice {
newMap := newNetwork.(types.Object).Attributes()
found := false
for _, oldNetwork := range oldSlice {
oldMap := oldNetwork.(types.Object).Attributes()
if newMap["net_type"] == oldMap["net_type"] && newMap["net_id"] == oldMap["net_id"] && newMap["weight"] == oldMap["weight"] && (newMap["mtu"] == oldMap["mtu"] || newMap["mtu"].(types.Int64).ValueInt64() == 0) {
if newMap["ip_address"] == oldMap["ip_address"] || newMap["ip_address"].(types.String).ValueString() != "" || ((newMap["net_type"].(types.String).ValueString() == "EXTNET" || newMap["net_type"].(types.String).ValueString() == "VINS") && newMap["ip_address"] != oldMap["ip_address"]) {
found = true
break
}
}
}
if found {
continue
}
added = append(added, newMap)
}
return

View File

@@ -3,6 +3,7 @@ package utilities
import (
"context"
"fmt"
"sort"
"strconv"
"strings"
@@ -169,7 +170,7 @@ func ComputeResourceExtraDiskUpdate(ctx context.Context, state *models.ResourceC
}
}
if len(detachSet) > 0 && (plan.Started.ValueBool() || plan.Started.IsNull()) {
if len(detachSet) > 0 && plan.Started.ValueBool() {
diags = ComputeResourceStartStop(ctx, plan, c)
}
@@ -182,6 +183,55 @@ func ComputeResourceExtraDiskUpdate(ctx context.Context, state *models.ResourceC
return nil
}
func ComputeResourcePCIDeviceUpdate(ctx context.Context, state *models.ResourceComputeModel, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeResourcePCIDeviceUpdate: start update PCI device(s) list to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("ComputeResourcePCIDeviceUpdate: cannot parsed ID compute from state", err.Error())
return diags
}
attachSet, detachSet := differenceSimpleType(state.ExtraDisks, plan.ExtraDisks)
for _, pciDevice := range detachSet {
pciDeviceId := pciDevice.(types.Int64).ValueInt64()
tflog.Info(ctx, fmt.Sprintf("ComputeResourcePCIDeviceUpdate: Start detach PCI device with ID - %d from compute with ID - %d", pciDeviceId, computeId))
req := compute.DetachPCIDeviceRequest{
ComputeID: computeId,
DeviceID: uint64(pciDeviceId),
}
res, err := c.CloudAPI().Compute().DetachPCIDevice(ctx, req)
tflog.Info(ctx, "ComputeResourcePCIDeviceUpdate: response from CloudAPI().Compute().DetachPCIDevice", map[string]any{"compute_id": plan.ID.ValueString(), "response": res})
if err != nil {
diags.AddError(fmt.Sprintf("ComputeResourcePCIDeviceUpdate: Cannot detach PCI device with ID - %d", pciDeviceId), err.Error())
}
}
for _, pciDevice := range attachSet {
pciDeviceId := pciDevice.(types.Int64).ValueInt64()
tflog.Info(ctx, fmt.Sprintf("ComputeResourcePCIDeviceUpdate: Start attach PCI device with ID - %d to compute with ID - %d", pciDeviceId, computeId))
req := compute.AttachPCIDeviceRequest{
ComputeID: computeId,
DeviceID: uint64(pciDeviceId),
}
res, err := c.CloudAPI().Compute().AttachPCIDevice(ctx, req)
tflog.Info(ctx, "ComputeResourcePCIDeviceUpdate: response from CloudAPI().Compute().AttachPCIDevice", map[string]any{"compute_id": plan.ID.ValueString(), "response": res})
if err != nil {
diags.AddError(fmt.Sprintf("ComputeResourcePCIDeviceUpdate: Cannot attach PCI device with ID - %d", pciDeviceId), err.Error())
}
}
if diags.HasError() {
tflog.Error(ctx, "ComputeResourcePCIDeviceUpdate: Errors occurred while managing PCI device(s)")
return diags
}
tflog.Info(ctx, "ComputeResourcePCIDeviceUpdate: PCI device(s) is successfully update", map[string]any{"compute_id": computeId})
return nil
}
func ComputeResourceNetworkUpdate(ctx context.Context, state *models.ResourceComputeModel, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeResourceNetworkUpdate: start update network rules to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
@@ -192,22 +242,43 @@ func ComputeResourceNetworkUpdate(ctx context.Context, state *models.ResourceCom
return diags
}
attachSet, detachSet := differenceNetwork(state.Network, plan.Network)
attachMap, changeIpMap, detachMap := differenceNetwork(state.Network, plan.Network)
for _, network := range detachSet {
objVal := network.(types.Object)
elemMap := objVal.Attributes()
tflog.Info(ctx, "ComputeResourceNetworkUpdate: start detach network(s) rules to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
for _, network := range detachMap {
req := compute.NetDetachRequest{
ComputeID: computeId,
IPAddr: elemMap["ip_address"].(types.String).ValueString(),
MAC: elemMap["mac"].(types.String).ValueString(),
IPAddr: network["ip_address"].(types.String).ValueString(),
MAC: network["mac"].(types.String).ValueString(),
}
tflog.Info(ctx, "ComputeResourceNetworkUpdate: before calling CloudAPI().Compute().NetDetach", map[string]any{"compute_id": computeId, "req": req})
res, err := c.CloudAPI().Compute().NetDetach(ctx, req)
tflog.Info(ctx, "ComputeResourceNetworkUpdate: response from CloudAPI().Compute().NetDetach", map[string]any{"compute_id": plan.ID.ValueString(), "response": res})
if err != nil {
diags.AddError(fmt.Sprintf("ComputeResourceNetworkUpdate: failed to detach net ID %d from Compute ID %d",
elemMap["net_id"].(types.Int64).ValueInt64(), computeId), err.Error())
network["net_id"].(types.Int64).ValueInt64(), computeId), err.Error())
}
}
if diags.HasError() {
return diags
}
tflog.Info(ctx, "ComputeResourceNetworkUpdate: start change IP network(s) rules to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()})
for _, network := range changeIpMap {
req := compute.ChangeIPRequest{
ComputeID: computeId,
NetType: network["net_type"].(types.String).ValueString(),
NetID: uint64(network["net_id"].(types.Int64).ValueInt64()),
IPAddr: network["ip_address"].(types.String).ValueString(),
}
tflog.Info(ctx, "ComputeResourceNetworkUpdate: before calling CloudAPI().Compute().ChangeIP", map[string]any{"compute_id": computeId, "req": req})
res, err := c.CloudAPI().Compute().ChangeIP(ctx, req)
tflog.Info(ctx, "ComputeResourceNetworkUpdate: response from CloudAPI().Compute().ChangeIP", map[string]any{"compute_id": plan.ID.ValueString(), "response": res})
if err != nil {
diags.AddError(fmt.Sprintf("ComputeResourceNetworkUpdate: failed to change IP net ID %d from Compute ID %d",
network["net_id"].(types.Int64).ValueInt64(), computeId), err.Error())
}
}
@@ -217,7 +288,7 @@ func ComputeResourceNetworkUpdate(ctx context.Context, state *models.ResourceCom
needStart := false
// need stop to attach first network
if (len(detachSet) == len(state.Network.Elements()) || len(state.Network.Elements()) < 1) && len(attachSet) > 0 {
if len(detachMap) == len(state.Network.Elements()) || (len(state.Network.Elements()) < 1) && len(attachMap) > 0 || hasDPDKnetwork(attachMap) {
tflog.Info(ctx, "ComputeResourceNetworkUpdate: stop compute", map[string]any{"compute_id": computeId})
_, err = c.CloudAPI().Compute().Stop(ctx, compute.StopRequest{ComputeID: computeId})
if err != nil {
@@ -226,18 +297,33 @@ func ComputeResourceNetworkUpdate(ctx context.Context, state *models.ResourceCom
)
return diags
}
needStart = true
if plan.Started.ValueBool() {
needStart = true
}
}
for _, network := range attachSet {
objVal := network.(types.Object)
elemMap := objVal.Attributes()
sort.Slice(attachMap, func(i, j int) bool {
weightI := attachMap[i]["weight"].(types.Int64).ValueInt64()
weightJ := attachMap[j]["weight"].(types.Int64).ValueInt64()
if weightI == 0 {
return false
}
if weightJ == 0 {
return true
}
return weightI < weightJ
})
for _, network := range attachMap {
req := compute.NetAttachRequest{
ComputeID: computeId,
NetType: strings.ToUpper(elemMap["net_type"].(types.String).ValueString()),
NetID: uint64(elemMap["net_id"].(types.Int64).ValueInt64()),
NetType: strings.ToUpper(network["net_type"].(types.String).ValueString()),
NetID: uint64(network["net_id"].(types.Int64).ValueInt64()),
}
ipaddr, ipSet := elemMap["ip_address"]
if req.NetType == "DPDK" {
req.MTU = uint64(network["mtu"].(types.Int64).ValueInt64())
}
ipaddr, ipSet := network["ip_address"]
if ipSet {
req.IPAddr = ipaddr.(types.String).ValueString()
}
@@ -246,11 +332,11 @@ func ComputeResourceNetworkUpdate(ctx context.Context, state *models.ResourceCom
tflog.Info(ctx, "ComputeResourceNetworkUpdate: response from CloudAPI().Compute().NetAttach", map[string]any{"compute_id": plan.ID.ValueString(), "response": res})
if err != nil {
diags.AddError(fmt.Sprintf("ComputeResourceNetworkUpdate: failed to attach net ID %d from Compute ID %d",
elemMap["net_id"].(types.Int64).ValueInt64(), computeId), err.Error())
network["net_id"].(types.Int64).ValueInt64(), computeId), err.Error())
}
}
if needStart && (plan.Started.ValueBool() || plan.Started.IsNull()) {
if needStart {
diags = ComputeResourceStartStop(ctx, plan, c)
}
@@ -263,6 +349,15 @@ func ComputeResourceNetworkUpdate(ctx context.Context, state *models.ResourceCom
return nil
}
func hasDPDKnetwork(networkAttachMap []map[string]attr.Value) bool {
for _, elem := range networkAttachMap {
if elem["net_type"].(types.String).ValueString() == "DPDK" {
return true
}
}
return false
}
func ComputeResourceComputeUpdate(ctx context.Context, state *models.ResourceComputeModel, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics {
tflog.Info(ctx, "ComputeResourceComputeUpdate: start update compute parameters", map[string]any{"compute_id": plan.ID.ValueString()})
diags := diag.Diagnostics{}
@@ -301,6 +396,10 @@ func ComputeResourceComputeUpdate(ctx context.Context, state *models.ResourceCom
req.HPBacked = state.HPBacked.ValueBool()
}
if !plan.Chipset.IsUnknown() && !plan.Chipset.Equal(state.Chipset) {
req.Chipset = plan.Chipset.ValueString()
}
// Note bene: numa_affinity, cpu_pin and hp_backed are not allowed to be changed for compute in STARTED tech status.
// If STARTED, we need to stop it before update
@@ -328,7 +427,7 @@ func ComputeResourceComputeUpdate(ctx context.Context, state *models.ResourceCom
return diags
}
if isStopRequred && (plan.Started.ValueBool() || plan.Started.IsNull()) {
if isStopRequred && plan.Started.ValueBool() {
diags = ComputeResourceStartStop(ctx, plan, c)
}
@@ -889,7 +988,7 @@ func ComputeResourceRollback(ctx context.Context, plan *models.ResourceComputeMo
return diags
}
if plan.Started.ValueBool() || plan.Started.IsNull() {
if plan.Started.ValueBool() {
diags = ComputeResourceStartStop(ctx, plan, c)
if diags.HasError() {
tflog.Error(ctx, "ComputeResourceRollback: cannot start compute")
@@ -1092,7 +1191,7 @@ func ComputeResourceRedeploy(ctx context.Context, plan *models.ResourceComputeMo
return diags
}
if plan.Started.ValueBool() || plan.Started.IsNull() {
if plan.Started.ValueBool() {
diags = ComputeResourceStartStop(ctx, plan, c)
if diags.HasError() {
tflog.Error(ctx, "ComputeResourceRedeploy: cannot start compute")

View File

@@ -39,6 +39,7 @@ func LBResource(ctx context.Context, plan *models.ResourceLBModel, c *decort.Dec
Timeouts: plan.Timeouts,
SysctlParams: plan.SysctlParams,
Permanently: plan.Permanently,
Restore: plan.Restore,
Restart: plan.Restart,
Enable: plan.Enable,
ConfigReset: plan.ConfigReset,

View File

@@ -83,7 +83,7 @@ func (r *resourceLB) Create(ctx context.Context, req resource.CreateRequest, res
// framework would mark resource as tainted and delete it, which would be unwanted behaviour.
// enable or disable lb, warnings added to resp.Diagnostics in case of failure.
if !plan.Enable.IsNull() { // Enable is optional
if !plan.Enable.ValueBool() { // Enable is optional
diags := utilities.LBEnableDisable(ctx, &plan, r.client)
for _, d := range diags {
if d.Severity() == diag.SeverityError {
@@ -307,17 +307,10 @@ func (r *resourceLB) Delete(ctx context.Context, req resource.DeleteRequest, res
ctx, cancel := context.WithTimeout(ctx, readTimeout)
defer cancel()
var permanently bool
if state.Permanently.IsNull() {
permanently = true
} else {
permanently = state.Permanently.ValueBool()
}
// Delete existing lb
delReq := lb.DeleteRequest{
LBID: uint64(state.LBID.ValueInt64()),
Permanently: permanently,
Permanently: state.Permanently.ValueBool(),
}
tflog.Info(ctx, "Delete resourceLB: calling CloudAPI().LB().Delete", map[string]any{

View File

@@ -2,6 +2,7 @@ package schemas
import (
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
)
@@ -36,6 +37,8 @@ func MakeSchemaResourceLB() map[string]schema.Attribute {
},
"enable": schema.BoolAttribute{
Optional: true,
Computed: true,
Default: booldefault.StaticBool(true),
},
"restart": schema.BoolAttribute{
Optional: true,
@@ -45,9 +48,13 @@ func MakeSchemaResourceLB() map[string]schema.Attribute {
},
"permanently": schema.BoolAttribute{
Optional: true,
Computed: true,
Default: booldefault.StaticBool(true),
},
"restore": schema.BoolAttribute{
Optional: true,
Computed: true,
Default: booldefault.StaticBool(true),
},
"safe": schema.BoolAttribute{
Optional: true,

View File

@@ -91,7 +91,7 @@ func LBEnableDisable(ctx context.Context, plan *models.ResourceLBModel, c *decor
diags.AddError("Cannot parsed ID lb from state", err.Error())
return diags
}
if plan.Enable.IsNull() || plan.Enable.ValueBool() {
if plan.Enable.ValueBool() {
tflog.Info(ctx, "Enable lb with ID", map[string]any{"lb_id": plan.ID.ValueString()})
_, err := c.CloudAPI().LB().Enable(ctx, lb.DisableEnableRequest{LBID: lbId})
if err != nil {
@@ -131,7 +131,7 @@ func LBReadStatus(ctx context.Context, plan *models.ResourceLBModel, c *decort.D
diags.AddError("Error:", fmt.Sprintf("The lb is in status: %s, please, contact support for more information", lbItem.Status))
return diags
case status.Deleted:
if plan.Restore.ValueBool() || plan.Restore.IsNull() {
if plan.Restore.ValueBool() {
diags = LBRestore(ctx, plan, c)
if diags.HasError() {
tflog.Error(ctx, "Error restore lb", map[string]any{"lb_id": plan.ID.ValueString()})
@@ -141,7 +141,7 @@ func LBReadStatus(ctx context.Context, plan *models.ResourceLBModel, c *decort.D
diags.AddError("LB in status Deleted:", "please clean state, or restore lb")
return diags
}
if plan.Enable.ValueBool() || plan.Enable.IsNull() {
if plan.Enable.ValueBool() {
diags = LBEnableDisable(ctx, plan, c)
if diags.HasError() {
tflog.Error(ctx, "Error enable/disable lb", map[string]any{"lb_id": plan.ID.ValueString()})
@@ -196,7 +196,7 @@ func LBStartStop(ctx context.Context, plan *models.ResourceLBModel, c *decort.De
diags.AddError("Cannot parsed ID lb from state", err.Error())
return diags
}
if plan.Enable.IsNull() || plan.Enable.ValueBool() {
if plan.Enable.ValueBool() {
if plan.Start.ValueBool() || plan.Start.IsNull() {
tflog.Info(ctx, "Start lb with ID", map[string]any{"lb_id": plan.ID.ValueString()})
_, err := c.CloudAPI().LB().Start(ctx, lb.StartRequest{LBID: lbId})
@@ -206,7 +206,7 @@ func LBStartStop(ctx context.Context, plan *models.ResourceLBModel, c *decort.De
}
}
}
if plan.Enable.ValueBool() || plan.Enable.IsNull() {
if plan.Enable.ValueBool() {
tflog.Info(ctx, "Stop lb with ID", map[string]any{"lb_id": plan.ID.ValueString()})
if !plan.Start.ValueBool() && !plan.Start.IsNull() {
_, err := c.CloudAPI().LB().Stop(ctx, lb.StopRequest{LBID: lbId})

View File

@@ -33,8 +33,8 @@ func RGDataSource(ctx context.Context, state *models.DataSourceRGModel, c *decor
id := uuid.New()
*state = models.DataSourceRGModel{
RGID: state.RGID,
Reason: state.Reason,
RGID: state.RGID,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),

View File

@@ -31,7 +31,6 @@ func RGUsageDataSource(ctx context.Context, state *models.DataSourceRGUsageModel
id := uuid.New()
*state = models.DataSourceRGUsageModel{
RGID: state.RGID,
Reason: state.Reason,
Timeouts: state.Timeouts,
Id: types.StringValue(id.String()),

View File

@@ -52,7 +52,6 @@ func RGResource(ctx context.Context, plan *models.ResourceRGModel, c *decort.Dec
Description: plan.Description,
Force: plan.Force,
Permanently: plan.Permanently,
Reason: plan.Reason,
RegisterComputes: plan.RegisterComputes,
Restore: plan.Restore,
Enable: plan.Enable,

View File

@@ -8,7 +8,6 @@ import (
type DataSourceRGModel struct {
// request fields
RGID types.Int64 `tfsdk:"rg_id"`
Reason types.String `tfsdk:"reason"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields

View File

@@ -8,7 +8,6 @@ import (
type DataSourceRGUsageModel struct {
// request fields
RGID types.Int64 `tfsdk:"rg_id"`
Reason types.String `tfsdk:"reason"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
// response fields

View File

@@ -24,7 +24,6 @@ type ResourceRGModel struct {
Description types.String `tfsdk:"description"`
Force types.Bool `tfsdk:"force"`
Permanently types.Bool `tfsdk:"permanently"`
Reason types.String `tfsdk:"reason"`
RegisterComputes types.Bool `tfsdk:"register_computes"`
Restore types.Bool `tfsdk:"restore"`
Enable types.Bool `tfsdk:"enable"`
@@ -76,27 +75,23 @@ type QuotaModel struct {
}
type AccessModel struct {
User types.String `tfsdk:"user"`
Right types.String `tfsdk:"right"`
Reason types.String `tfsdk:"reason"`
User types.String `tfsdk:"user"`
Right types.String `tfsdk:"right"`
}
type DefNetModel struct {
NetType types.String `tfsdk:"net_type"`
NetId types.Int64 `tfsdk:"net_id"`
Reason types.String `tfsdk:"reason"`
}
var ItemAccess = map[string]attr.Type{
"user": types.StringType,
"right": types.StringType,
"reason": types.StringType,
"user": types.StringType,
"right": types.StringType,
}
var ItemDefNet = map[string]attr.Type{
"net_type": types.StringType,
"net_id": types.Int64Type,
"reason": types.StringType,
}
var ItemACL = map[string]attr.Type{

View File

@@ -237,13 +237,6 @@ func (r *resourceRG) Update(ctx context.Context, req resource.UpdateRequest, res
// Validate if changes in plan are allowed
tflog.Info(ctx, "Update resourceRG: checking def_net is not empty in case of change", map[string]any{
"rg_id": state.Id.ValueString()})
if !state.DefNet.IsNull() && plan.DefNet.IsNull() {
resp.Diagnostics.AddError(
"Update resourceRG: Invalid input provided",
fmt.Sprintf("block def_net must not be empty for resource with rg_id %d", recordRG.ID),
)
return
}
tflog.Info(ctx, "Update resourceRG: checking def_net_type, ipcidr, ext_ip are not changed", map[string]any{
"rg_id": state.Id.ValueString(),
@@ -370,9 +363,6 @@ func (r *resourceRG) Delete(ctx context.Context, req resource.DeleteRequest, res
} else {
delReq.Permanently = state.Permanently.ValueBool()
}
if !state.Reason.IsNull() {
delReq.Reason = state.Reason.ValueString()
}
_, err := r.client.CloudAPI().RG().Delete(ctx, delReq)
if err != nil {
resp.Diagnostics.AddError("Delete resourceRG: Error deleting resource group with error: ", err.Error())

View File

@@ -13,12 +13,6 @@ func MakeSchemaDataSourceRG() map[string]schema.Attribute {
Description: "resource group id",
},
// optional attributes
"reason": schema.StringAttribute{
Optional: true,
Description: "reason for request",
},
//computed attributes
"account_id": schema.Int64Attribute{
Computed: true,

View File

@@ -12,12 +12,6 @@ func MakeSchemaDataSourceRGUsage() map[string]schema.Attribute {
Description: "find by rg id",
},
// optional attributes
"reason": schema.StringAttribute{
Optional: true,
Description: "reason for action",
},
//computed attributes
"id": schema.StringAttribute{
Computed: true,

View File

@@ -109,10 +109,6 @@ func MakeSchemaResourceRG() map[string]schema.Attribute {
Required: true,
Description: "Access rights to set, one of 'R', 'RCX' or 'ARCXDU'",
},
"reason": schema.StringAttribute{
Optional: true,
Description: "Reason for action",
},
},
},
},
@@ -132,10 +128,6 @@ func MakeSchemaResourceRG() map[string]schema.Attribute {
Description: "Network segment ID. If netType is PUBLIC and netId is 0 then default external network segment will be selected. If netType is PRIVATE and netId=0, the first ViNS defined for this RG will be selected. Otherwise, netId identifies either existing external network segment or ViNS.",
// default value is 0
},
"reason": schema.StringAttribute{
Optional: true,
Description: "Reason for action",
},
},
},
"description": schema.StringAttribute{
@@ -152,10 +144,6 @@ func MakeSchemaResourceRG() map[string]schema.Attribute {
Description: "Set to True if you want force delete non-empty RG",
// default value is true
},
"reason": schema.StringAttribute{
Optional: true,
Description: "Set to True if you want force delete non-empty RG",
},
"register_computes": schema.BoolAttribute{
Optional: true,
Description: "Register computes in registration system",
@@ -171,6 +159,13 @@ func MakeSchemaResourceRG() map[string]schema.Attribute {
// default value is true
},
"uniq_pools": schema.ListAttribute{
Optional: true,
Computed: true,
ElementType: types.StringType,
Description: "List of strings with pools. Applies only when updating",
},
// computed attributes
"rg_id": schema.Int64Attribute{
Computed: true,
@@ -267,9 +262,5 @@ func MakeSchemaResourceRG() map[string]schema.Attribute {
Computed: true,
ElementType: types.StringType,
},
"uniq_pools": schema.ListAttribute{
Computed: true,
ElementType: types.StringType,
},
}
}

View File

@@ -13,10 +13,6 @@ import (
func RGUsageCheckPresence(ctx context.Context, plan *models.DataSourceRGUsageModel, c *decort.DecortClient) (*rg.RecordResourceUsage, error) {
usageReq := rg.UsageRequest{RGID: uint64(plan.RGID.ValueInt64())}
if !plan.Reason.IsNull() {
usageReq.Reason = plan.Reason.ValueString()
}
tflog.Info(ctx, "RGUsageCheckPresence: before call CloudAPI().RG().Usage", map[string]any{"response": usageReq})
usage, err := c.CloudAPI().RG().Usage(ctx, usageReq)
if err != nil {

View File

@@ -71,10 +71,8 @@ func CreateRequestResourceRG(ctx context.Context, plan *models.ResourceRGModel)
createReq.MaxNumPublicIP = -1
}
// set up defNet, owner, ipcidr, description, reason, extNetId, extIp, registerComputes optional parameters
if plan.DefNetType.IsNull() {
createReq.DefNet = "PRIVATE" // default value
} else {
// set up defNet, owner, ipcidr, description, extNetId, extIp, registerComputes optional parameters
if !plan.DefNetType.IsNull() {
createReq.DefNet = plan.DefNetType.ValueString()
}
if !plan.Owner.IsNull() {
@@ -86,9 +84,6 @@ func CreateRequestResourceRG(ctx context.Context, plan *models.ResourceRGModel)
if !plan.Description.IsNull() {
createReq.Description = plan.Description.ValueString()
}
if !plan.Reason.IsNull() {
createReq.Reason = plan.Reason.ValueString()
}
if plan.ExtNetID.IsNull() {
createReq.ExtNetID = 0 // default value 0
} else {
@@ -143,9 +138,6 @@ func RestoreRG(ctx context.Context, rgId uint64, c *decort.DecortClient) diag.Di
// EnableRG performs resource group Enable request
func EnableRG(ctx context.Context, rgId uint64, plan *models.ResourceRGModel, c *decort.DecortClient) error {
enableReq := rg.EnableRequest{RGID: rgId}
if !plan.Reason.IsNull() {
enableReq.Reason = plan.Reason.ValueString()
}
tflog.Info(ctx, "utilityEnableRG: before calling CloudAPI().RG().Enable", map[string]any{"rg_id": rgId, "req": enableReq})
@@ -159,10 +151,6 @@ func EnableRG(ctx context.Context, rgId uint64, plan *models.ResourceRGModel, c
// DisableRG performs resource group Disable request
func DisableRG(ctx context.Context, rgId uint64, plan *models.ResourceRGModel, c *decort.DecortClient) error {
disableReq := rg.DisableRequest{RGID: rgId}
if !plan.Reason.IsNull() {
disableReq.Reason = plan.Reason.ValueString()
}
tflog.Info(ctx, "utilityDisableRG: before calling CloudAPI().RG().Disable", map[string]any{"rg_id": rgId, "req": disableReq})
res, err := c.CloudAPI().RG().Disable(ctx, disableReq)
@@ -181,9 +169,6 @@ func UpdateRG(ctx context.Context, rgId uint64, plan, state *models.ResourceRGMo
updateReq := rg.UpdateRequest{
RGID: rgId,
}
if !state.Reason.IsNull() {
updateReq.Reason = state.Reason.ValueString()
}
if !plan.Name.Equal(state.Name) {
updateReq.Name = plan.Name.ValueString()
@@ -216,17 +201,37 @@ func UpdateRG(ctx context.Context, rgId uint64, plan, state *models.ResourceRGMo
updateNeeded = true
}
if !plan.UniqPools.Equal(state.UniqPools) {
if plan.UniqPools.IsNull() || len(plan.UniqPools.Elements()) == 0 {
updateReq.ClearUniqPools = true
} else {
uPoolsList := make([]string, 0, len(plan.UniqPools.Elements()))
diags.Append(plan.UniqPools.ElementsAs(ctx, &uPoolsList, true)...)
if diags.HasError() {
tflog.Error(ctx, "utilityUpdateRG: cannot populate result with plan.UniqPools object element")
return diags
}
updateReq.UniqPools = uPoolsList
}
tflog.Info(ctx, "utilityUpdateRG: new register_computes specified", map[string]any{
"rg_id": plan.Id.ValueString(),
"register_computes_plan": plan.RegisterComputes.ValueBool(),
"register_computes_state": state.RegisterComputes.ValueBool()})
updateNeeded = true
}
var updQuotaNeeded bool
var quotaPlan, quotaState models.QuotaModel
if !plan.Quota.IsNull() {
diags = plan.Quota.As(ctx, &quotaPlan, basetypes.ObjectAsOptions{})
diags = plan.Quota.As(ctx, &quotaPlan, basetypes.ObjectAsOptions{UnhandledNullAsEmpty: true, UnhandledUnknownAsEmpty: true})
if diags.HasError() {
return diags
}
}
if !state.Quota.IsNull() {
diags = state.Quota.As(ctx, &quotaState, basetypes.ObjectAsOptions{})
diags = state.Quota.As(ctx, &quotaState, basetypes.ObjectAsOptions{UnhandledNullAsEmpty: true, UnhandledUnknownAsEmpty: true})
if diags.HasError() {
return diags
}
@@ -424,9 +429,6 @@ func AccessUpdateRG(ctx context.Context, rgId uint64, plan, state *models.Resour
RGID: rgId,
User: deletedAccessItem.User.ValueString(),
}
if !deletedAccessItem.Reason.IsNull() {
revokeReq.Reason = deletedAccessItem.Reason.ValueString()
}
tflog.Info(ctx, "AccessUpdateRG: before calling CloudAPI().RG().AccessRevoke", map[string]any{"rg_id": plan.Id.ValueString(), "req": revokeReq})
res, err := c.CloudAPI().RG().AccessRevoke(ctx, revokeReq)
@@ -462,9 +464,6 @@ func AccessUpdateRG(ctx context.Context, rgId uint64, plan, state *models.Resour
User: addedAccessItem.User.ValueString(),
Right: addedAccessItem.Right.ValueString(),
}
if !addedAccessItem.Reason.IsNull() {
grantReq.Reason = addedAccessItem.Reason.ValueString()
}
tflog.Info(ctx, "AccessUpdateRG: before calling CloudAPI().RG().AccessGrant", map[string]any{"rg_id": plan.Id.ValueString(), "req": grantReq})
res, err := c.CloudAPI().RG().AccessGrant(ctx, grantReq)
@@ -506,9 +505,6 @@ func AccessCreateRG(ctx context.Context, rgId uint64, plan *models.ResourceRGMod
User: addedAccessItem.User.ValueString(),
Right: addedAccessItem.Right.ValueString(),
}
if !addedAccessItem.Reason.IsNull() {
grantReq.Reason = addedAccessItem.Reason.ValueString()
}
tflog.Info(ctx, "AccessCreateRG: before calling CloudAPI().RG().AccessGrant", map[string]any{
"rg_id": rgId,
@@ -568,7 +564,7 @@ func SetDefNetUpdateRG(ctx context.Context, rgId uint64, plan, state *models.Res
setDefNetNeeded = true
}
} else if !plan.DefNet.IsNull() {
} else {
setDefNetNeeded = true
}
@@ -577,25 +573,36 @@ func SetDefNetUpdateRG(ctx context.Context, rgId uint64, plan, state *models.Res
"rg_id": plan.Id.ValueString(),
"def_net_plan": plan.DefNet,
"def_net_state": state.DefNet})
setDefNetReq.NetType = itemDefNetPlan.NetType.ValueString()
if itemDefNetPlan.NetId.IsNull() {
setDefNetReq.NetID = 0 // default value
} else {
setDefNetReq.NetID = uint64(itemDefNetPlan.NetId.ValueInt64())
}
if !itemDefNetPlan.Reason.IsNull() {
setDefNetReq.Reason = itemDefNetPlan.Reason.ValueString()
}
tflog.Info(ctx, "utilitySetDefNetUpdateRG: before calling CloudAPI().RG().SetDefNet", map[string]any{"rg_id": plan.Id.ValueString(), "req": setDefNetReq})
res, err := c.CloudAPI().RG().SetDefNet(ctx, setDefNetReq)
if err != nil {
diags.AddError(
"SetDefNetUpdateRG: can not set defNet for rg",
err.Error())
return diags
if plan.DefNet.IsNull() {
removeReq := rg.RemoveDefNetRequest{RGID: uint64(state.RGID.ValueInt64())}
res, err := c.CloudAPI().RG().RemoveDefNet(ctx, removeReq)
if err != nil {
diags.AddError(
"SetDefNetUpdateRG: can not remove defNet for rg",
err.Error())
return diags
}
tflog.Info(ctx, "utilitySetDefNetUpdateRG: response from CloudAPI().RG().RemoveDefNet", map[string]any{"rg_id": plan.Id.ValueString(), "response": res})
} else {
setDefNetReq.NetType = itemDefNetPlan.NetType.ValueString()
if itemDefNetPlan.NetId.IsNull() {
setDefNetReq.NetID = 0 // default value
} else {
setDefNetReq.NetID = uint64(itemDefNetPlan.NetId.ValueInt64())
}
tflog.Info(ctx, "utilitySetDefNetUpdateRG: before calling CloudAPI().RG().SetDefNet", map[string]any{"rg_id": plan.Id.ValueString(), "req": setDefNetReq})
res, err := c.CloudAPI().RG().SetDefNet(ctx, setDefNetReq)
if err != nil {
diags.AddError(
"SetDefNetUpdateRG: can not set defNet for rg",
err.Error())
return diags
}
tflog.Info(ctx, "utilitySetDefNetUpdateRG: response from CloudAPI().RG().SetDefNet", map[string]any{"rg_id": plan.Id.ValueString(), "response": res})
}
tflog.Info(ctx, "utilitySetDefNetUpdateRG: response from CloudAPI().RG().SetDefNet", map[string]any{"rg_id": plan.Id.ValueString(), "response": res})
}
if !setDefNetNeeded {
@@ -637,9 +644,6 @@ func SetDefNetCreateRG(ctx context.Context, rgId uint64, plan *models.ResourceRG
} else {
setDefNetReq.NetID = uint64(itemDefNetPlan.NetId.ValueInt64())
}
if !itemDefNetPlan.Reason.IsNull() {
setDefNetReq.Reason = itemDefNetPlan.Reason.ValueString()
}
tflog.Info(ctx, "SetDefNetCreateRG: before calling CloudAPI().RG().SetDefNet", map[string]any{"rg_id": rgId, "req": setDefNetReq})
res, err := c.CloudAPI().RG().SetDefNet(ctx, setDefNetReq)

View File

@@ -189,24 +189,27 @@ func flattenInterfaces(ctx context.Context, items *vins.ListVNFInterfaces) types
tempSlice := make([]types.Object, 0, len(*items))
for _, item := range *items {
temp := models.VNFInterfaceModel{
ConnID: types.Int64Value(int64(item.ConnID)),
ConnType: types.StringValue(item.ConnType),
DefGW: types.StringValue(item.DefGW),
Enabled: types.BoolValue(item.Enabled),
FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)),
GUID: types.StringValue(item.GUID),
IPAddress: types.StringValue(item.IPAddress),
ListenSSH: types.BoolValue(item.ListenSSH),
MAC: types.StringValue(item.MAC),
Name: types.StringValue(item.Name),
NetID: types.Int64Value(int64(item.NetID)),
NetMask: types.Int64Value(int64(item.NetMask)),
NetType: types.StringValue(item.NetType),
NodeID: types.Int64Value(int64(item.NodeID)),
PCISlot: types.Int64Value(int64(item.PCISlot)),
QOS: flattenQOS(ctx, &item.QOS),
Target: types.StringValue(item.Target),
Type: types.StringValue(item.Type),
BusNumber: types.Int64Value(int64(item.BusNumber)),
ConnID: types.Int64Value(int64(item.ConnID)),
ConnType: types.StringValue(item.ConnType),
DefGW: types.StringValue(item.DefGW),
Enabled: types.BoolValue(item.Enabled),
FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)),
GUID: types.StringValue(item.GUID),
IPAddress: types.StringValue(item.IPAddress),
ListenSSH: types.BoolValue(item.ListenSSH),
MAC: types.StringValue(item.MAC),
MTU: types.Int64Value(int64(item.MTU)),
Libvirtsettings: flattenLibvirtSettings(ctx, &item.LibvirtSettings),
Name: types.StringValue(item.Name),
NetID: types.Int64Value(int64(item.NetID)),
NetMask: types.Int64Value(int64(item.NetMask)),
NetType: types.StringValue(item.NetType),
NodeID: types.Int64Value(int64(item.NodeID)),
PCISlot: types.Int64Value(int64(item.PCISlot)),
QOS: flattenQOS(ctx, &item.QOS),
Target: types.StringValue(item.Target),
Type: types.StringValue(item.Type),
}
temp.VNFs, diags = types.ListValueFrom(ctx, types.Int64Type, item.VNFs)
@@ -230,6 +233,28 @@ func flattenInterfaces(ctx context.Context, items *vins.ListVNFInterfaces) types
return res
}
func flattenLibvirtSettings(ctx context.Context, settings *vins.LibvirtSettings) types.Object {
tflog.Info(ctx, "Start flattenLibvirtSettings")
temp := models.LibvirtSettingsModel{
TXMode: types.StringValue(settings.TXMode),
IOEventFD: types.StringValue(settings.IOEventFD),
EventIDx: types.StringValue(settings.EventIDx),
Queues: types.Int64Value(int64(settings.Queues)),
RXQueueSize: types.Int64Value(int64(settings.RXQueueSize)),
TXQueueSize: types.Int64Value(int64(settings.TXQueueSize)),
GUID: types.StringValue(settings.GUID),
}
res, err := types.ObjectValueFrom(ctx, models.LibvirtSettings, temp)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenLibvirtSettings struct to obj", err))
}
tflog.Info(ctx, "End flattenLibvirtSettings")
return res
}
// flattenQOS flattens QOS.
// Flatten errors are added to tflog.
func flattenQOS(ctx context.Context, qos *vins.QOS) types.Object {

View File

@@ -89,25 +89,48 @@ type RecordResourcesModel struct {
}
type VNFInterfaceModel struct {
ConnID types.Int64 `tfsdk:"conn_id"`
ConnType types.String `tfsdk:"conn_type"`
DefGW types.String `tfsdk:"def_gw"`
Enabled types.Bool `tfsdk:"enabled"`
FLIPGroupID types.Int64 `tfsdk:"flipgroup_id"`
BusNumber types.Int64 `tfsdk:"bus_number"`
ConnID types.Int64 `tfsdk:"conn_id"`
ConnType types.String `tfsdk:"conn_type"`
DefGW types.String `tfsdk:"def_gw"`
Enabled types.Bool `tfsdk:"enabled"`
FLIPGroupID types.Int64 `tfsdk:"flipgroup_id"`
GUID types.String `tfsdk:"guid"`
IPAddress types.String `tfsdk:"ip_address"`
Libvirtsettings types.Object `tfsdk:"libvirt_settings"`
ListenSSH types.Bool `tfsdk:"listen_ssh"`
MAC types.String `tfsdk:"mac"`
MTU types.Int64 `tfsdk:"mtu"`
Name types.String `tfsdk:"name"`
NetID types.Int64 `tfsdk:"net_id"`
NetMask types.Int64 `tfsdk:"net_mask"`
NetType types.String `tfsdk:"net_type"`
NodeID types.Int64 `tfsdk:"node_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
QOS types.Object `tfsdk:"qos"`
Target types.String `tfsdk:"target"`
Type types.String `tfsdk:"type"`
VNFs types.List `tfsdk:"vnfs"`
}
type LibvirtSettingsModel struct {
TXMode types.String `tfsdk:"txmode"`
IOEventFD types.String `tfsdk:"ioeventfd"`
EventIDx types.String `tfsdk:"event_idx"`
Queues types.Int64 `tfsdk:"queues"`
RXQueueSize types.Int64 `tfsdk:"rx_queue_size"`
TXQueueSize types.Int64 `tfsdk:"tx_queue_size"`
GUID types.String `tfsdk:"guid"`
IPAddress types.String `tfsdk:"ip_address"`
ListenSSH types.Bool `tfsdk:"listen_ssh"`
MAC types.String `tfsdk:"mac"`
Name types.String `tfsdk:"name"`
NetID types.Int64 `tfsdk:"net_id"`
NetMask types.Int64 `tfsdk:"net_mask"`
NetType types.String `tfsdk:"net_type"`
NodeID types.Int64 `tfsdk:"node_id"`
PCISlot types.Int64 `tfsdk:"pci_slot"`
QOS types.Object `tfsdk:"qos"`
Target types.String `tfsdk:"target"`
Type types.String `tfsdk:"type"`
VNFs types.List `tfsdk:"vnfs"`
}
var LibvirtSettings = map[string]attr.Type{
"txmode": types.StringType,
"ioeventfd": types.StringType,
"event_idx": types.StringType,
"queues": types.Int64Type,
"rx_queue_size": types.Int64Type,
"tx_queue_size": types.Int64Type,
"guid": types.StringType,
}
type QOSModel struct {
@@ -278,25 +301,28 @@ var ItemVNFDev = map[string]attr.Type{
}
var ItemInterface = map[string]attr.Type{
"conn_id": types.Int64Type,
"conn_type": types.StringType,
"def_gw": types.StringType,
"enabled": types.BoolType,
"flipgroup_id": types.Int64Type,
"guid": types.StringType,
"ip_address": types.StringType,
"listen_ssh": types.BoolType,
"mac": types.StringType,
"name": types.StringType,
"net_id": types.Int64Type,
"net_mask": types.Int64Type,
"net_type": types.StringType,
"node_id": types.Int64Type,
"pci_slot": types.Int64Type,
"qos": types.ObjectType{AttrTypes: ItemQOS},
"target": types.StringType,
"type": types.StringType,
"vnfs": types.ListType{ElemType: types.Int64Type},
"bus_number": types.Int64Type,
"conn_id": types.Int64Type,
"conn_type": types.StringType,
"def_gw": types.StringType,
"enabled": types.BoolType,
"flipgroup_id": types.Int64Type,
"guid": types.StringType,
"ip_address": types.StringType,
"libvirt_settings": types.ObjectType{AttrTypes: LibvirtSettings},
"listen_ssh": types.BoolType,
"mac": types.StringType,
"mtu": types.Int64Type,
"name": types.StringType,
"net_id": types.Int64Type,
"net_mask": types.Int64Type,
"net_type": types.StringType,
"node_id": types.Int64Type,
"pci_slot": types.Int64Type,
"qos": types.ObjectType{AttrTypes: ItemQOS},
"target": types.StringType,
"type": types.StringType,
"vnfs": types.ListType{ElemType: types.Int64Type},
}
var ItemQOS = map[string]attr.Type{

View File

@@ -12,6 +12,7 @@ type DataSourceVINSListModel struct {
AccountID types.Int64 `tfsdk:"account_id"`
RGID types.Int64 `tfsdk:"rg_id"`
ExtIP types.String `tfsdk:"ext_ip"`
VNFDevID types.Int64 `tfsdk:"vnf_dev_id"`
IncludeDeleted types.Bool `tfsdk:"include_deleted"`
Page types.Int64 `tfsdk:"page"`
SortBy types.String `tfsdk:"sort_by"`

View File

@@ -91,6 +91,9 @@ func MakeSchemaDataSourceVINS() map[string]schema.Attribute {
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"bus_number": schema.Int64Attribute{
Computed: true,
},
"conn_id": schema.Int64Attribute{
Computed: true,
},
@@ -112,12 +115,41 @@ func MakeSchemaDataSourceVINS() map[string]schema.Attribute {
"ip_address": schema.StringAttribute{
Computed: true,
},
"libvirt_settings": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"txmode": schema.StringAttribute{
Computed: true,
},
"ioeventfd": schema.StringAttribute{
Computed: true,
},
"event_idx": schema.StringAttribute{
Computed: true,
},
"queues": schema.Int64Attribute{
Computed: true,
},
"rx_queue_size": schema.Int64Attribute{
Computed: true,
},
"tx_queue_size": schema.Int64Attribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
},
},
"listen_ssh": schema.BoolAttribute{
Computed: true,
},
"mac": schema.StringAttribute{
Computed: true,
},
"mtu": schema.Int64Attribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},

View File

@@ -27,6 +27,10 @@ func MakeSchemaDataSourceVINSList() map[string]schema.Attribute {
Optional: true,
Description: "Filter by external IP address",
},
"vnf_dev_id": schema.Int64Attribute{
Optional: true,
Description: "Filter by VNF Device id",
},
"include_deleted": schema.BoolAttribute{
Optional: true,
Description: "Include deleted computes",

View File

@@ -258,6 +258,9 @@ func MakeSchemaResourceVINS() map[string]schema.Attribute {
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"bus_number": schema.Int64Attribute{
Computed: true,
},
"conn_id": schema.Int64Attribute{
Computed: true,
},
@@ -279,12 +282,41 @@ func MakeSchemaResourceVINS() map[string]schema.Attribute {
"ip_address": schema.StringAttribute{
Computed: true,
},
"libvirt_settings": schema.SingleNestedAttribute{
Computed: true,
Attributes: map[string]schema.Attribute{
"txmode": schema.StringAttribute{
Computed: true,
},
"ioeventfd": schema.StringAttribute{
Computed: true,
},
"event_idx": schema.StringAttribute{
Computed: true,
},
"queues": schema.Int64Attribute{
Computed: true,
},
"rx_queue_size": schema.Int64Attribute{
Computed: true,
},
"tx_queue_size": schema.Int64Attribute{
Computed: true,
},
"guid": schema.StringAttribute{
Computed: true,
},
},
},
"listen_ssh": schema.BoolAttribute{
Computed: true,
},
"mac": schema.StringAttribute{
Computed: true,
},
"mtu": schema.Int64Attribute{
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},

View File

@@ -31,6 +31,9 @@ func VINSListDataSourceCheckPresence(ctx context.Context, plan *models.DataSourc
if !plan.ExtIP.IsNull() {
listReq.ExtIP = plan.ExtIP.ValueString()
}
if !plan.VNFDevID.IsNull() {
listReq.VNFDevId = uint64(plan.VNFDevID.ValueInt64())
}
if !plan.IncludeDeleted.IsNull() {
listReq.IncludeDeleted = plan.IncludeDeleted.ValueBool()
}

Some files were not shown because too many files have changed in this diff Show More