This commit is contained in:
2026-02-11 13:02:14 +03:00
parent 069d63a65c
commit b8283ebfaf
277 changed files with 2184 additions and 4192 deletions

View File

@@ -38,7 +38,6 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/secgroup"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/sep"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/snapshot"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/stack"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/stpolicy"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/trunk"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vfpool"
@@ -66,7 +65,6 @@ import (
cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg"
cb_secgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/secgroup"
cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep"
cb_stack "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/stack"
cb_stpolicy "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/stpolicy"
cb_trunk "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/trunk"
cb_user "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/user"
@@ -161,8 +159,6 @@ func newDataSourcesMap() map[string]*schema.Resource {
"decort_lb_list_deleted": lb.DataSourceLBListDeleted(),
"decort_flipgroup": flipgroup.DataSourceFlipgroup(),
"decort_flipgroup_list": flipgroup.DataSourceFlipGroupList(),
"decort_stack": stack.DataSourceStack(),
"decort_stack_list": stack.DataSourceStackList(),
"decort_vfpool": vfpool.DataSourceVFPool(),
"decort_vfpool_list": vfpool.DataSourceVFPoolList(),
"decort_account_resource_consumption_list": account.DataSourceAccountResourceConsumptionList(),
@@ -218,7 +214,6 @@ func newDataSourcesMap() map[string]*schema.Resource {
"decort_cb_grid_list_consumption": cb_grid.DataSourceGridListConsumption(),
"decort_cb_grid_get_consumption": cb_grid.DataSourceGridGetConsumption(),
"decort_cb_image_list": cb_image.DataSourceImageList(),
"decort_cb_image_list_stacks": cb_image.DataSourceImageListStacks(),
"decort_cb_kvmvm": cb_kvmvm.DataSourceCompute(),
"decort_cb_kvmvm_affinity_relations": cb_kvmvm.DataSourceComputeAffinityRelations(),
"decort_cb_kvmvm_audits": cb_kvmvm.DataSourceComputeAudits(),
@@ -277,8 +272,6 @@ func newDataSourcesMap() map[string]*schema.Resource {
"decort_cb_flipgroup": cb_flipgroup.DataSourceFlipgroup(),
"decort_cb_security_group": cb_secgroup.DataSourceSecurityGroup(),
"decort_cb_security_group_list": cb_secgroup.DataSourceSecurityGroupList(),
"decort_cb_stack_list": cb_stack.DataSourceStacksList(),
"decort_cb_stack": cb_stack.DataSourceStack(),
"decort_cb_storage_policy": cb_stpolicy.DataSourceStoragePolicy(),
"decort_cb_storage_policy_list": cb_stpolicy.DataSourceStoragePolicyList(),
"decort_cb_trunk": cb_trunk.DataSourceTrunk(),

View File

@@ -41,7 +41,6 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
// TODO: resources (additional ds / additional request inside body (?))
func dataSourceAccountRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
acc, err := utilityAccountCheckPresence(ctx, d, m)
if err != nil {
@@ -119,10 +118,6 @@ func resourceLimitsSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
@@ -171,91 +166,6 @@ func sepsSchemaMake() map[string]*schema.Schema {
return res
}
func resourcesSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"current": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeFloat,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Resource{
Schema: sepsSchemaMake(),
},
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeFloat,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Resource{
Schema: sepsSchemaMake(),
},
},
},
},
},
}
return res
}
func computesSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"started": {

View File

@@ -55,7 +55,6 @@ func dataSourceAccountConsumedUnitsRead(ctx context.Context, d *schema.ResourceD
d.Set("cu_dm", accountConsumedUnits.CUDM)
d.Set("cu_i", accountConsumedUnits.CUI)
d.Set("cu_m", accountConsumedUnits.CUM)
d.Set("cu_np", accountConsumedUnits.CUNP)
d.Set("gpu_units", accountConsumedUnits.GPUUnits)
return nil
@@ -88,10 +87,6 @@ func dataSourceAccountConsumedUnitsSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,

View File

@@ -108,10 +108,6 @@ func dataSourceResourceLimitsSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,

View File

@@ -55,7 +55,6 @@ func dataSourceAccountReservedUnitsRead(ctx context.Context, d *schema.ResourceD
d.Set("cu_dm", accountReservedUnits.CUDM)
d.Set("cu_i", accountReservedUnits.CUI)
d.Set("cu_m", accountReservedUnits.CUM)
d.Set("cu_np", accountReservedUnits.CUNP)
d.Set("gpu_units", accountReservedUnits.GPUUnits)
return nil
@@ -88,10 +87,6 @@ func dataSourceAccountReservedUnitsSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,

View File

@@ -131,10 +131,6 @@ func dataSourceAccResourceSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -80,12 +80,11 @@ func flattenAccRGComputes(argc account.RGComputes) []map[string]interface{} {
func flattenAccResourceHack(r account.LimitsRG) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"cpu": r.CPU,
"disksize": r.DiskSize,
"extips": r.ExtIPs,
"exttraffic": r.ExtTraffic,
"gpu": r.GPU,
"ram": r.RAM,
"cpu": r.CPU,
"disksize": r.DiskSize,
"extips": r.ExtIPs,
"gpu": r.GPU,
"ram": r.RAM,
//"seps": flattenAccountSeps(r.SEPs),
}
res = append(res, temp)
@@ -95,12 +94,11 @@ func flattenAccResourceHack(r account.LimitsRG) []map[string]interface{} {
func flattenAccResourceRg(r account.Resource) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"cpu": r.CPU,
"disksize": r.DiskSize,
"extips": r.ExtIPs,
"exttraffic": r.ExtTraffic,
"gpu": r.GPU,
"ram": r.RAM,
"cpu": r.CPU,
"disksize": r.DiskSize,
"extips": r.ExtIPs,
"gpu": r.GPU,
"ram": r.RAM,
}
res = append(res, temp)
return res
@@ -223,10 +221,6 @@ func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -256,10 +250,6 @@ func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -288,10 +278,6 @@ func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -8,8 +8,6 @@ import (
func flattenAccount(d *schema.ResourceData, acc account.RecordAccount) error {
d.Set("dc_location", acc.DCLocation)
d.Set("desc", acc.Description)
//TODO
// d.Set("resources", flattenAccResources(acc.Resources))
d.Set("acl", flattenAccAcl(acc.ACL))
d.Set("company", acc.Company)
d.Set("companyurl", acc.CompanyURL)
@@ -52,8 +50,6 @@ func flattenAccount(d *schema.ResourceData, acc account.RecordAccount) error {
func flattenAccountResource(d *schema.ResourceData, acc account.RecordAccount) error {
d.Set("dc_location", acc.DCLocation)
d.Set("desc", acc.Description)
//TODO
// d.Set("resources", flattenAccResources(acc.Resources))
d.Set("acl", flattenAccAcl(acc.ACL))
d.Set("company", acc.Company)
d.Set("companyurl", acc.CompanyURL)
@@ -144,7 +140,6 @@ func flattenAccResourceLimits(rl account.ResourceLimits) []map[string]interface{
"cu_dm": rl.CUDM,
"cu_i": rl.CUI,
"cu_m": rl.CUM,
"cu_np": rl.CUNP,
"gpu_units": rl.GPUUnits,
"storage_policy": flattenSTPolicy(rl.StoragePolicy),
}
@@ -165,16 +160,6 @@ func flattenSTPolicy(ast []account.StoragePolicyItem) []map[string]interface{} {
return res
}
// func flattenAccResources(r account.Resources) []map[string]interface{} {
// res := make([]map[string]interface{}, 0)
// temp := map[string]interface{}{
// "current": flattenAccResource(r.Current),
// "reserved": flattenAccResource(r.Reserved),
// }
// res = append(res, temp)
// return res
// }
func flattenAccountSeps(seps map[string]map[string]account.DiskUsage) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for sepKey, sepVal := range seps {
@@ -198,7 +183,6 @@ func flattenAccResource(r account.Resource) []map[string]interface{} {
"disk_size": r.DiskSize,
"disk_size_max": r.DiskSizeMax,
"ext_ips": r.ExtIPs,
"ext_traffic": r.ExtTraffic,
"gpu": r.GPU,
"ram": r.RAM,
"seps": flattenAccountSeps(r.SEPs),

View File

@@ -297,15 +297,6 @@ func resourceAccountUpdate(ctx context.Context, d *schema.ResourceData, m interf
}
updated = true
}
if resLimitConv["cu_np"] != nil {
maxNP := int(resLimitConv["cu_np"].(float64))
if maxNP == 0 {
req.MaxNetworkPeerTransfer = -1
} else {
req.MaxNetworkPeerTransfer = int64(maxNP)
}
updated = true
}
if resLimitConv["gpu_units"] != nil {
gpuUnits := int(resLimitConv["gpu_units"].(float64))
if gpuUnits == 0 {
@@ -548,11 +539,6 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
Optional: true,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Optional: true,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Optional: true,
@@ -585,13 +571,6 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
// "resources": {
// Type: schema.TypeList,
// Computed: true,
// Elem: &schema.Resource{
// Schema: resourcesSchemaMake(),
// },
// },
"acl": {
Type: schema.TypeList,
Computed: true,

View File

@@ -52,7 +52,7 @@ func utilityAuditCheckPresence(ctx context.Context, d *schema.ResourceData, m in
req.AuditGuid = d.Get("audit_guid").(string)
}
log.Debugf("utilityStackCheckPresence: load audit")
log.Debugf("utilityAuditCheckPresence: load audit")
auditInfo, err := c.CloudAPI().Audit().Get(ctx, req)
if err != nil {
return nil, err

View File

@@ -110,7 +110,7 @@ func dataSourceBasicServiceSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},

View File

@@ -101,7 +101,7 @@ func flattenBasicServiceComputes(bscs bservice.ListComputes) []map[string]interf
"id": bsc.ID,
"name": bsc.Name,
"rg_id": bsc.RGID,
"stack_id": bsc.StackID,
"node_id": bsc.NodeID,
"status": bsc.Status,
"tech_status": bsc.TechStatus,
}

View File

@@ -576,7 +576,7 @@ func resourceBasicServiceSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},

View File

@@ -58,7 +58,6 @@ func resourceBasicServiceGroupCreate(ctx context.Context, d *schema.ResourceData
RAM: uint64(d.Get("ram").(int)),
Disk: uint64(d.Get("disk").(int)),
ImageID: uint64(d.Get("image_id").(int)),
Driver: d.Get("driver").(string),
StoragePolicyID: uint64(d.Get("storage_policy_id").(int)),
}
@@ -424,13 +423,8 @@ func resourceBasicServiceGroupSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"i440fx", "Q35"}, false),
Default: "i440fx",
},
"driver": {
Type: schema.TypeString,
Optional: true,
Description: "compute driver like a KVM_X86, etc.",
Default: "KVM_X86",
Default: "Q35",
Description: "Chipset for virtual machines.",
},
///
"role": {
@@ -579,6 +573,10 @@ func resourceBasicServiceGroupSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"driver": {
Type: schema.TypeString,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -80,6 +80,10 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"blk_discard": {
Type: schema.TypeBool,
Computed: true,
},
// "boot_partition": {
// Type: schema.TypeInt,
// Computed: true,
@@ -382,6 +386,10 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"size_available": {
Type: schema.TypeFloat,
Computed: true,

View File

@@ -147,6 +147,10 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"blk_discard": {
Type: schema.TypeBool,
Computed: true,
},
// "boot_partition": {
// Type: schema.TypeInt,
// Computed: true,
@@ -456,6 +460,10 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"size_available": {
Type: schema.TypeFloat,
Computed: true,

View File

@@ -138,11 +138,6 @@ func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"_ckey": {
Type: schema.TypeString,
Computed: true,
Description: "CKey",
},
"_meta": {
Type: schema.TypeList,
Computed: true,
@@ -165,6 +160,10 @@ func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"blk_discard": {
Type: schema.TypeBool,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
@@ -391,6 +390,11 @@ func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "shareable",
},
"cache": {
Type: schema.TypeString,
Computed: true,
Description: "Cache mode for the disk",
},
"size_max": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -128,6 +128,10 @@ func dataSourceDiskDeletedListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"blk_discard": {
Type: schema.TypeBool,
Computed: true,
},
// "boot_partition": {
// Type: schema.TypeInt,
// Computed: true,
@@ -437,6 +441,10 @@ func dataSourceDiskDeletedListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"size_available": {
Type: schema.TypeFloat,
Computed: true,

View File

@@ -21,11 +21,11 @@ func flattenDiskListUnattached(ul *disks.ListDisksUnattached) []map[string]inter
for _, unattachedDisk := range ul.Data {
unattachedDiskAcl, _ := json.Marshal(unattachedDisk.ACL)
tmp := map[string]interface{}{
"_ckey": unattachedDisk.CKey,
"_meta": flattens.FlattenMeta(unattachedDisk.Meta),
"account_id": unattachedDisk.AccountID,
"account_name": unattachedDisk.AccountName,
"acl": string(unattachedDiskAcl),
"blk_discard": unattachedDisk.BLKDiscard,
"boot_partition": unattachedDisk.BootPartition,
"created_time": unattachedDisk.CreatedTime,
"deleted_time": unattachedDisk.DeletedTime,
@@ -57,6 +57,7 @@ func flattenDiskListUnattached(ul *disks.ListDisksUnattached) []map[string]inter
"role": unattachedDisk.Role,
"sep_id": unattachedDisk.SEPID,
"shareable": unattachedDisk.Shareable,
"cache": unattachedDisk.Cache,
"size_max": unattachedDisk.SizeMax,
"size_used": unattachedDisk.SizeUsed,
"snapshots": flattenDiskSnapshotList(unattachedDisk.Snapshots),
@@ -75,6 +76,7 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
d.Set("account_id", disk.AccountID)
d.Set("account_name", disk.AccountName)
d.Set("acl", string(diskAcl))
d.Set("blk_discard", disk.BLKDiscard)
// d.Set("boot_partition", disk.BootPartition)
d.Set("computes", flattenDiskComputes(disk.Computes))
d.Set("created_by", disk.CreatedBy)
@@ -95,7 +97,6 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
// d.Set("login", disk.Login)
d.Set("machine_id", disk.MachineID)
d.Set("machine_name", disk.MachineName)
d.Set("milestones", disk.Milestones)
d.Set("disk_name", disk.Name)
d.Set("order", disk.Order)
d.Set("params", disk.Params)
@@ -114,10 +115,10 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
d.Set("role", disk.Role)
d.Set("sep_id", disk.SepID)
d.Set("sep_type", disk.SepType)
d.Set("size_available", disk.SizeAvailable)
d.Set("size_max", disk.SizeMax)
d.Set("size_used", disk.SizeUsed)
d.Set("shareable", disk.Shareable)
d.Set("cache", disk.Cache)
d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots))
d.Set("status", disk.Status)
d.Set("storage_policy_id", disk.StoragePolicyID)
@@ -186,6 +187,7 @@ func flattenDiskReplica(d *schema.ResourceData, disk *disks.RecordDisk, statusRe
d.Set("size_max", disk.SizeMax)
d.Set("size_used", disk.SizeUsed)
d.Set("shareable", disk.Shareable)
d.Set("cache", disk.Cache)
d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots))
d.Set("status", disk.Status)
d.Set("status_replication", statusReplication)
@@ -219,6 +221,7 @@ func flattenDiskList(dl *disks.ListDisks) []map[string]interface{} {
"account_id": disk.AccountID,
"account_name": disk.AccountName,
"acl": string(diskAcl),
"blk_discard": disk.BLKDiscard,
"computes": flattenDiskComputes(disk.Computes),
"created_by": disk.CreatedBy,
"created_time": disk.CreatedTime,
@@ -250,6 +253,7 @@ func flattenDiskList(dl *disks.ListDisks) []map[string]interface{} {
"sep_id": disk.SepID,
"sep_type": disk.SepType,
"shareable": disk.Shareable,
"cache": disk.Cache,
"size_available": disk.SizeAvailable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,

View File

@@ -533,6 +533,10 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
// "boot_partition": {
// Type: schema.TypeInt,
// Computed: true,

View File

@@ -133,7 +133,7 @@ func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m i
TimeStamp: uint64(d.Get("timestamp").(int)),
}
log.Debugf("resourceDiskUpdtae: Snapshot rollback with label", label)
log.Debugf("resourceDiskUpdtae: Snapshot rollback with label %s", label)
_, err := c.CloudAPI().Disks().SnapshotRollback(ctx, req)
if err != nil {
return diag.FromErr(err)

View File

@@ -145,14 +145,6 @@ func dataSourceFlipgroupSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,

View File

@@ -90,11 +90,6 @@ func dataSourceFlipgroupListSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "Filter by IP-address",
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by RG ID",
},
"sort_by": {
Type: schema.TypeString,
Optional: true,

View File

@@ -61,8 +61,6 @@ func flattenFlipgroup(d *schema.ResourceData, fg *flipgroup.RecordFLIPGroup) {
d.Set("net_id", fg.NetID)
d.Set("net_type", fg.NetType)
d.Set("network", fg.Network)
d.Set("rg_id", fg.RGID)
d.Set("rg_name", fg.RGName)
d.Set("status", fg.Status)
d.Set("updated_by", fg.UpdatedBy)
d.Set("updated_time", fg.UpdatedTime)

View File

@@ -265,14 +265,6 @@ func resourceFlipgroupSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
},
"updated_by": {
Type: schema.TypeString,
Computed: true,

View File

@@ -165,9 +165,6 @@ func utilityFlipgroupListCheckPresence(ctx context.Context, d *schema.ResourceDa
if byIp, ok := d.GetOk("by_ip"); ok {
req.ByIP = byIp.(string)
}
if rgId, ok := d.GetOk("rg_id"); ok {
req.RGID = uint64(rgId.(int))
}
if byId, ok := d.GetOk("by_id"); ok {
req.ByID = uint64(byId.(int))
}

View File

@@ -26,7 +26,6 @@ func flattenImage(d *schema.ResourceData, img *image.RecordImage) {
cdPresentedTo, _ := json.Marshal(img.CdPresentedTo)
d.Set("unc_path", img.UNCPath)
d.Set("ckey", img.CKey)
d.Set("account_id", img.AccountID)
d.Set("acl", FlattenACL(img.ACL))
d.Set("architecture", img.Architecture)

View File

@@ -49,10 +49,6 @@ func dataSourceImageExtendSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"account_id": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -805,11 +805,13 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Description: "insert ssl certificate in x509 pem format",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Description: "Type of the emulated system.",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"Q35", "i440fx"}, false),
Default: "Q35",
Description: "Type of the emulated system. Possible values: i440fx, Q35. Default: Q35",
},
"desc": {
Type: schema.TypeString,

View File

@@ -715,11 +715,13 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
),
Description: "Node RAM in MB.",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Description: "Type of the emulated system.",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"Q35", "i440fx"}, false),
Default: "Q35",
Description: "Type of the emulated system. Possible values: i440fx, Q35. Default: Q35",
},
"disk": {
Type: schema.TypeInt,
Optional: true,

View File

@@ -290,11 +290,13 @@ func resourceK8sWgSchemaMake() map[string]*schema.Schema {
Description: "Number of worker nodes to create.",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Description: "Type of the emulated system.",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"Q35", "i440fx"}, false),
Default: "Q35",
Description: "Type of the emulated system. Possible values: i440fx, Q35. Default: Q35",
},
"cpu": {
Type: schema.TypeInt,

View File

@@ -256,6 +256,10 @@ func computeListDisksSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"blk_discard": {
Type: schema.TypeBool,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
@@ -264,6 +268,10 @@ func computeListDisksSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
@@ -1035,6 +1043,11 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"read_only": {
Type: schema.TypeBool,
Computed: true,
Description: "Shows if compute is in read-only mode.",
},
"userdata": {
Type: schema.TypeString,
Computed: true,
@@ -1128,14 +1141,6 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"virtual_image_id": {
Type: schema.TypeInt,
Computed: true,
},
"virtual_image_name": {
Type: schema.TypeString,
Computed: true,
},
"stateless_sep_id": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -76,6 +76,10 @@ func computeDisksSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
},
}
}
func itemComputeSchemaMake() map[string]*schema.Schema {
@@ -226,10 +230,6 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
},
"interfaces": {
Type: schema.TypeList,
Computed: true,
@@ -409,6 +409,11 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"read_only": {
Type: schema.TypeBool,
Computed: true,
Description: "Shows if compute is in read-only mode.",
},
"vgpus": {
Type: schema.TypeList,
Computed: true,
@@ -420,10 +425,6 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"virtual_image_id": {
Type: schema.TypeInt,
Computed: true,
},
"loader_type": {
Type: schema.TypeString,
Computed: true,

View File

@@ -128,7 +128,7 @@ func dataSourceComputePCIDeviceListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},

View File

@@ -50,6 +50,7 @@ func flattenDisks(disks []compute.InfoDisk) []map[string]interface{} {
// "bus_number": disk.BusNumber,
"disk_id": disk.ID,
// "pci_slot": disk.PCISlot,
"sep_id": disk.SepID,
}
res = append(res, temp)
}
@@ -179,76 +180,73 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
customFields, _ := json.Marshal(compute.CustomFields)
devices, _ := json.Marshal(compute.Devices)
temp := map[string]interface{}{
"acl": flattenListACL(compute.ACL),
"account_id": compute.AccountID,
"account_name": compute.AccountName,
"affinity_label": compute.AffinityLabel,
"affinity_rules": flattenListRules(compute.AffinityRules),
"affinity_weight": compute.AffinityWeight,
"anti_affinity_rules": flattenListRules(compute.AntiAffinityRules),
"arch": compute.Architecture,
"auto_start_w_node": compute.AutoStart,
"boot_order": compute.BootOrder,
"bootdisk_size": compute.BootDiskSize,
"boot_image_id": compute.BootImageID,
"chipset": compute.Chipset,
"cd_image_id": compute.CdImageId,
"clone_reference": compute.CloneReference,
"clones": compute.Clones,
"computeci_id": compute.ComputeCIID,
"cpu_pin": compute.CPUPin,
"cpus": compute.CPU,
"created_by": compute.CreatedBy,
"created_time": compute.CreatedTime,
"custom_fields": string(customFields),
"deleted_by": compute.DeletedBy,
"deleted_time": compute.DeletedTime,
"desc": compute.Description,
"devices": string(devices),
"disks": flattenDisks(compute.Disks),
"driver": compute.Driver,
"gid": compute.GID,
"guid": compute.GUID,
"hp_backed": compute.HPBacked,
"compute_id": compute.ID,
//TODO
// "image_id": compute.ImageID,
"interfaces": flattenInterfaces(compute.Interfaces),
"live_migration_job_id": compute.LiveMigrationJobID,
"lock_status": compute.LockStatus,
"manager_id": compute.ManagerID,
"manager_type": compute.ManagerType,
"migrationjob": compute.MigrationJob,
"milestones": compute.Milestones,
"name": compute.Name,
"need_reboot": compute.NeedReboot,
"numa_affinity": compute.NumaAffinity,
"numa_node_id": compute.NumaNodeId,
"os_version": compute.OSVersion,
"pinned": compute.PinnedToStack,
"preferred_cpu": compute.PreferredCPU,
"qemu_guest": flattenQemuQuest(compute.QemuQuest),
"ram": compute.RAM,
"reference_id": compute.ReferenceID,
"registered": compute.Registered,
"res_name": compute.ResName,
"reserved_node_cpus": compute.ReservedNodeCpus,
"rg_id": compute.RGID,
"rg_name": compute.RGName,
"snap_sets": flattenSnapSets(compute.SnapSets),
"stateless_sep_id": compute.StatelessSepID,
"stateless_sep_type": compute.StatelessSepType,
"status": compute.Status,
"tags": flattenTags(compute.Tags),
"tech_status": compute.TechStatus,
"total_disk_size": compute.TotalDiskSize,
"updated_by": compute.UpdatedBy,
"updated_time": compute.UpdatedTime,
"user_managed": compute.UserManaged,
"vgpus": compute.VGPUs,
"vins_connected": compute.VINSConnected,
//TODO
// "virtual_image_id": compute.VirtualImageID,
"acl": flattenListACL(compute.ACL),
"account_id": compute.AccountID,
"account_name": compute.AccountName,
"affinity_label": compute.AffinityLabel,
"affinity_rules": flattenListRules(compute.AffinityRules),
"affinity_weight": compute.AffinityWeight,
"anti_affinity_rules": flattenListRules(compute.AntiAffinityRules),
"arch": compute.Architecture,
"auto_start_w_node": compute.AutoStart,
"boot_order": compute.BootOrder,
"bootdisk_size": compute.BootDiskSize,
"boot_image_id": compute.BootImageID,
"chipset": compute.Chipset,
"cd_image_id": compute.CdImageId,
"clone_reference": compute.CloneReference,
"clones": compute.Clones,
"computeci_id": compute.ComputeCIID,
"cpu_pin": compute.CPUPin,
"cpus": compute.CPU,
"created_by": compute.CreatedBy,
"created_time": compute.CreatedTime,
"custom_fields": string(customFields),
"deleted_by": compute.DeletedBy,
"deleted_time": compute.DeletedTime,
"desc": compute.Description,
"devices": string(devices),
"disks": flattenDisks(compute.Disks),
"driver": compute.Driver,
"gid": compute.GID,
"guid": compute.GUID,
"hp_backed": compute.HPBacked,
"compute_id": compute.ID,
"interfaces": flattenInterfaces(compute.Interfaces),
"live_migration_job_id": compute.LiveMigrationJobID,
"lock_status": compute.LockStatus,
"manager_id": compute.ManagerID,
"manager_type": compute.ManagerType,
"migrationjob": compute.MigrationJob,
"milestones": compute.Milestones,
"name": compute.Name,
"need_reboot": compute.NeedReboot,
"numa_affinity": compute.NumaAffinity,
"numa_node_id": compute.NumaNodeId,
"os_version": compute.OSVersion,
"pinned": compute.PinnedToNode,
"preferred_cpu": compute.PreferredCPU,
"qemu_guest": flattenQemuQuest(compute.QemuQuest),
"ram": compute.RAM,
"reference_id": compute.ReferenceID,
"registered": compute.Registered,
"res_name": compute.ResName,
"reserved_node_cpus": compute.ReservedNodeCpus,
"rg_id": compute.RGID,
"rg_name": compute.RGName,
"snap_sets": flattenSnapSets(compute.SnapSets),
"stateless_sep_id": compute.StatelessSepID,
"stateless_sep_type": compute.StatelessSepType,
"status": compute.Status,
"tags": flattenTags(compute.Tags),
"tech_status": compute.TechStatus,
"total_disk_size": compute.TotalDiskSize,
"updated_by": compute.UpdatedBy,
"updated_time": compute.UpdatedTime,
"user_managed": compute.UserManaged,
"read_only": compute.ReadOnly,
"vgpus": compute.VGPUs,
"vins_connected": compute.VINSConnected,
"loader_type": compute.LoaderType,
"boot_type": compute.BootType,
"hot_resize": compute.HotResize,
@@ -279,6 +277,8 @@ func flattenBootDisk(bootDisk *compute.ItemComputeDisk) []map[string]interface{}
"present_to": bootDisk.PresentTo,
"storage_policy_id": bootDisk.StoragePolicyID,
"to_clean": bootDisk.ToClean,
"cache": bootDisk.Cache,
"blk_discard": bootDisk.BLKDiscard,
}
res = append(res, temp)
@@ -333,6 +333,8 @@ func flattenComputeDisksDemo(disksList compute.ListComputeDisks, disksBlocks, ex
"deleted_time": disk.DeletedTime,
"updated_time": disk.UpdatedTime,
"permanently": pernamentlyValue,
"cache": disk.Cache,
"blk_discard": disk.BLKDiscard,
}
res = append(res, temp)
indexDataDisks++
@@ -351,6 +353,7 @@ func flattenNetwork(networks []interface{}, interfaces compute.ListInterfaces) [
"ip_address": network.IPAddress,
"mac": network.MAC,
"mtu": network.MTU,
"net_mask": network.NetMask,
"sdn_interface_id": network.SDNInterfaceID,
"weight": flattenNetworkWeight(networks, network.NetID, network.NetType),
"enabled": network.Enabled,
@@ -428,12 +431,7 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute, pc
d.Set("gid", computeRec.GID)
d.Set("guid", computeRec.GUID)
d.Set("compute_id", computeRec.ID)
//TODO
// if computeRec.VirtualImageID != 0 {
// d.Set("image_id", computeRec.VirtualImageID)
// } else {
// d.Set("image_id", computeRec.ImageID)
// }
d.Set("image_id", computeRec.ImageID)
d.Set("interfaces", flattenInterfaces(computeRec.Interfaces))
d.Set("lock_status", computeRec.LockStatus)
d.Set("manager_id", computeRec.ManagerID)
@@ -451,7 +449,7 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute, pc
if err := d.Set("os_users", parseOsUsers(computeRec.OSUsers)); err != nil {
return err
}
d.Set("pinned", computeRec.PinnedToStack)
d.Set("pinned", computeRec.PinnedToNode)
d.Set("preferred_cpu", computeRec.PreferredCPU)
d.Set("ram", computeRec.RAM)
d.Set("reference_id", computeRec.ReferenceID)
@@ -469,11 +467,9 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute, pc
d.Set("updated_by", computeRec.UpdatedBy)
d.Set("updated_time", computeRec.UpdatedTime)
d.Set("user_managed", computeRec.UserManaged)
d.Set("read_only", computeRec.ReadOnly)
d.Set("vnc_password", computeRec.VNCPassword)
d.Set("vgpus", flattenVGPUs(computeRec.VGPUs))
//TODO
// d.Set("virtual_image_id", computeRec.VirtualImageID)
// d.Set("virtual_image_name", computeRec.VirtualImageName)
d.Set("loader_type", computeRec.LoaderType)
d.Set("boot_type", computeRec.BootType)
d.Set("hot_resize", computeRec.HotResize)
@@ -585,9 +581,11 @@ func flattenListComputeDisks(disks compute.ListComputeDisks) []map[string]interf
"_ckey": disk.CKey,
"acl": string(acl),
"account_id": disk.AccountID,
"blk_discard": disk.BLKDiscard,
"boot_partition": disk.BootPartition,
"bus_number": disk.BusNumber,
"created_time": disk.CreatedTime,
"cache": disk.Cache,
"created_by": disk.CreatedBy,
"deleted_time": disk.DeletedTime,
"deleted_by": disk.DeletedBy,
@@ -722,7 +720,7 @@ func flattenDataCompute(d *schema.ResourceData, computeRec compute.RecordCompute
d.Set("natable_vins_network", computeRec.NatableVINSNetwork)
d.Set("natable_vins_network_name", computeRec.NatableVINSNetworkName)
d.Set("os_users", flattenOsUsers(computeRec.OSUsers))
d.Set("pinned", computeRec.PinnedToStack)
d.Set("pinned", computeRec.PinnedToNode)
d.Set("preferred_CPU", computeRec.PreferredCPU)
d.Set("qemu_guest", flattenQemuQuest(computeRec.QemuQuest))
d.Set("ram", computeRec.RAM)
@@ -741,12 +739,10 @@ func flattenDataCompute(d *schema.ResourceData, computeRec compute.RecordCompute
d.Set("updated_by", computeRec.UpdatedBy)
d.Set("updated_time", computeRec.UpdatedTime)
d.Set("user_managed", computeRec.UserManaged)
d.Set("read_only", computeRec.ReadOnly)
d.Set("userdata", string(userdata))
d.Set("vnc_password", computeRec.VNCPassword)
d.Set("vgpus", flattenVGPUs(computeRec.VGPUs))
//TODO
// d.Set("virtual_image_id", computeRec.VirtualImageID)
// d.Set("virtual_image_name", computeRec.VirtualImageName)
d.Set("pci_devices", flattenPCI(*pciList))
d.Set("loader_type", computeRec.LoaderType)
d.Set("boot_type", computeRec.BootType)
@@ -884,7 +880,7 @@ func flattenPCIDevice(deviceList []compute.ItemPCIDevice) []map[string]interface
"device_id": dev.ID,
"name": dev.Name,
"rg_id": dev.RGID,
"stack_id": dev.StackID,
"node_id": dev.NodeID,
"status": dev.Status,
"system_name": dev.SystemName,
}

View File

@@ -184,6 +184,13 @@ func networkSubresourceSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "network enable flag",
},
"net_mask": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Subnet mask, used only for DPDK and VFNIC network types",
},
}
return rets
}

View File

@@ -182,6 +182,12 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
reqInterface.MTU = uint64(netInterfaceVal["mtu"].(int))
}
if reqInterface.NetType == "DPDK" || reqInterface.NetType == "VFNIC" {
if netMask, netMaskSet := netInterfaceVal["net_mask"]; netMaskSet {
reqInterface.NetMask = uint64(netMask.(int))
}
}
ipaddr, ipSet := netInterfaceVal["ip_address"]
if ipSet {
reqInterface.IPAddr = ipaddr.(string)
@@ -460,16 +466,16 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if d.Get("pin_to_stack").(bool) {
if d.Get("pin_to_node").(bool) {
if !d.Get("started").(bool) {
warnings.Add(errors.New("cannot pin to stack a VM, VM should be started"))
warnings.Add(errors.New("cannot pin to node a VM, VM should be started"))
}
if d.Get("started").(bool) {
req := compute.PinToStackRequest{
req := compute.PinToNodeRequest{
ComputeID: computeId,
}
req.AutoStart = d.Get("auto_start_w_node").(bool)
_, err = c.CloudAPI().Compute().PinToStack(ctx, req)
_, err = c.CloudAPI().Compute().PinToNode(ctx, req)
if err != nil {
warnings.Add(err)
}
@@ -637,7 +643,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if !d.Get("pin_to_stack").(bool) && d.Get("auto_start_w_node").(bool) {
if !d.Get("pin_to_node").(bool) && d.Get("auto_start_w_node").(bool) {
req := compute.UpdateRequest{
ComputeID: computeId,
AutoStart: d.Get("auto_start_w_node").(bool),
@@ -1775,6 +1781,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
return diag.FromErr(err)
}
}
} else {
req := compute.StopRequest{
ComputeID: computeRec.ID,
@@ -1785,28 +1792,28 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if d.HasChange("pin_to_stack") || d.HasChange("started") {
newPin := d.Get("pin_to_stack")
if !newPin.(bool) && d.HasChange("pin_to_stack") {
req := compute.UnpinFromStackRequest{
if d.HasChange("pin_to_node") || d.HasChange("started") {
newPin := d.Get("pin_to_node")
if !newPin.(bool) && d.HasChange("pin_to_node") {
req := compute.UnpinFromNodeRequest{
ComputeID: computeRec.ID,
}
_, err := c.CloudAPI().Compute().UnpinFromStack(ctx, req)
_, err := c.CloudAPI().Compute().UnpinFromNode(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
if newPin.(bool) {
if !d.Get("started").(bool) {
return diag.Errorf("Cannot pin to stack a VM, that is not started")
return diag.Errorf("Cannot pin to node a VM, that is not started")
}
req := compute.PinToStackRequest{
req := compute.PinToNodeRequest{
ComputeID: computeRec.ID,
}
req.AutoStart = d.Get("auto_start_w_node").(bool)
_, err = c.CloudAPI().Compute().PinToStack(ctx, req)
_, err = c.CloudAPI().Compute().PinToNode(ctx, req)
if err != nil {
return diag.FromErr(err)
}
@@ -1992,6 +1999,10 @@ func disksSubresourceSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
@@ -2012,6 +2023,10 @@ func disksSubresourceSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"blk_discard": {
Type: schema.TypeBool,
Computed: true,
},
}
return rets
}
@@ -2199,7 +2214,7 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
},
"value": {
Type: schema.TypeString,
Optional: true,
Required: true,
Description: "value that must match the key to be taken into account when analyzing this rule",
},
},
@@ -2235,7 +2250,7 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
},
"value": {
Type: schema.TypeString,
Optional: true,
Required: true,
Description: "value that must match the key to be taken into account when analyzing this rule",
},
},
@@ -2345,7 +2360,7 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Schema: cdSubresourceSchemaMake(),
},
},
"pin_to_stack": {
"pin_to_node": {
Type: schema.TypeBool,
Optional: true,
Default: false,
@@ -2735,6 +2750,11 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"read_only": {
Type: schema.TypeBool,
Computed: true,
Description: "Shows if compute is in read-only mode.",
},
"vnc_password": {
Type: schema.TypeString,
Computed: true,
@@ -2866,12 +2886,12 @@ func ResourceCompute() *schema.Resource {
},
CustomizeDiff: func(ctx context.Context, diff *schema.ResourceDiff, i interface{}) error {
if diff.HasChanges() || diff.HasChanges("chipset", "pin_to_stack", "auto_start_w_node", "network", "affinity_rules", "anti_affinity_rules",
if diff.HasChanges() || diff.HasChanges("chipset", "pin_to_node", "auto_start_w_node", "network", "affinity_rules", "anti_affinity_rules",
"extra_disks", "tags", "port_forwarding", "user_access", "snapshot", "pci_devices", "preferred_cpu", "security_groups") {
diff.SetNewComputed("updated_time")
diff.SetNewComputed("updated_by")
}
if diff.HasChanges("pin_to_stack") {
if diff.HasChanges("pin_to_node") {
diff.SetNewComputed("pinned")
}
if diff.HasChanges("image_id") {

View File

@@ -281,6 +281,12 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
req.MTU = uint64(netData["mtu"].(int))
}
if req.NetType == "DPDK" || req.NetType == "VFNIC" {
if netMask, ok := netData["net_mask"].(int); ok && netMask > 0 {
req.NetMask = uint64(netMask)
}
}
_, err := c.CloudAPI().Compute().NetAttach(ctx, req)
if err != nil {
log.Errorf("utilityComputeNetworksConfigure: failed to attach net ID %d of type %s to Compute ID %s: %s",

View File

@@ -60,7 +60,8 @@ func lbResourceSchemaMake() map[string]*schema.Schema {
}
sch["start"] = &schema.Schema{
Type: schema.TypeBool,
Required: true,
Optional: true,
Default: true,
}
sch["desc"] = &schema.Schema{
Type: schema.TypeString,

View File

@@ -82,7 +82,10 @@ func resourceLBCreate(ctx context.Context, d *schema.ResourceData, m interface{}
RGID: uint64(d.Get("rg_id").(int)),
ExtNetID: uint64(d.Get("extnet_id").(int)),
VINSID: uint64(d.Get("vins_id").(int)),
Start: d.Get("start").(bool),
}
if start, ok := d.GetOk("start"); ok {
req.Start = start.(bool)
}
if desc, ok := d.GetOk("desc"); ok {

View File

@@ -212,8 +212,6 @@ func resourceLBBackendServerUpdate(ctx context.Context, d *schema.ResourceData,
return diag.FromErr(err)
}
//TODO: перенести servers сюда
return resourceLBBackendServerRead(ctx, d, m)
}

View File

@@ -114,10 +114,6 @@ func DataSourceResgroup() *schema.Resource {
// Type: schema.TypeInt,
// Computed: true,
// },
// "exttraffic": {
// Type: schema.TypeInt,
// Computed: true,
// },
// "gpu": {
// Type: schema.TypeInt,
// Computed: true,
@@ -169,10 +165,6 @@ func DataSourceResgroup() *schema.Resource {
// Type: schema.TypeInt,
// Computed: true,
// },
// "exttraffic": {
// Type: schema.TypeInt,
// Computed: true,
// },
// "gpu": {
// Type: schema.TypeInt,
// Computed: true,
@@ -261,10 +253,6 @@ func resourceLimitsSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,

View File

@@ -138,10 +138,6 @@ func dataSourceRGResourceSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -183,10 +179,6 @@ func dataSourceResourceLimitsSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,

View File

@@ -76,10 +76,6 @@ func dataSourceRgUsageSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -63,7 +63,6 @@ func flattenRGSeps(seps map[string]map[string]rg.DiskUsage) []map[string]interfa
// "cpu": r.CPU,
// "disksize": r.DiskSize,
// "extips": r.ExtIPs,
// "exttraffic": r.ExtTraffic,
// "gpu": r.GPU,
// "ram": r.RAM,
// "seps": flattenRgSeps(r.SEPs),
@@ -143,13 +142,12 @@ func flattenRgSeps(seps map[string]map[string]rg.DiskUsage) []map[string]interfa
func flattenQuota(resource rg.ResourceLimits) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"cpu": resource.CUC,
"ram": resource.CUM,
"disk": resource.CUDM,
"ext_ips": resource.CUI,
"ext_traffic": resource.CUNP,
"gpu_units": resource.GPUUnits,
"cu_d": resource.CUD,
"cpu": resource.CUC,
"ram": resource.CUM,
"disk": resource.CUDM,
"ext_ips": resource.CUI,
"gpu_units": resource.GPUUnits,
"cu_d": resource.CUD,
}
res = append(res, temp)
@@ -164,7 +162,6 @@ func flattenResource(resource rg.Resource) []map[string]interface{} {
"disk_size": resource.DiskSize,
"disk_size_max": resource.DiskSizeMax,
"extips": resource.ExtIPs,
"exttraffic": resource.ExtTraffic,
"gpu": resource.GPU,
"ram": resource.RAM,
"seps": flattenRGSeps(resource.SEPs),
@@ -292,7 +289,6 @@ func flattenRgResourceLimits(rl rg.ResourceLimits) []map[string]interface{} {
"cu_i": rl.CUI,
"cu_m": rl.CUM,
"cu_dm": rl.CUDM,
"cu_np": rl.CUNP,
"gpu_units": rl.GPUUnits,
"storage_policy": flattenRgStoragePolicy(rl.StoragePolicies),
}
@@ -609,7 +605,6 @@ func flattenRgUsageResource(d *schema.ResourceData, usage rg.RecordResourceUsage
d.Set("disk_size", usage.DiskSize)
d.Set("disk_size_max", usage.DiskSizeMax)
d.Set("extips", usage.ExtIPs)
d.Set("exttraffic", usage.ExtraTraffic)
d.Set("gpu", usage.GPU)
d.Set("ram", usage.RAM)
d.Set("seps", flattenRgSeps(usage.SEPs))

View File

@@ -37,17 +37,15 @@ type ResourceLimits struct {
CUD float64 `json:"CU_D"`
CUI float64 `json:"CU_I"`
CUM float64 `json:"CU_M"`
CUNP float64 `json:"CU_NP"`
GpuUnits float64 `json:"gpu_units"`
}
type QuotaRecord struct { // this is how quota is reported by /api/.../rg/get
Cpu int `json:"CU_C"` // CPU count in pcs
Ram int `json:"CU_M"` // RAM volume in MB
Disk int `json:"CU_D"` // Disk capacity in GB
ExtIPs int `json:"CU_I"` // Ext IPs count
ExtTraffic int `json:"CU_NP"` // Ext network traffic
GpuUnits int `json:"gpu_units"` // GPU count
Cpu int `json:"CU_C"` // CPU count in pcs
Ram int `json:"CU_M"` // RAM volume in MB
Disk int `json:"CU_D"` // Disk capacity in GB
ExtIPs int `json:"CU_I"` // Ext IPs count
GpuUnits int `json:"gpu_units"` // GPU count
}
// Main information about audit
@@ -85,9 +83,6 @@ type Resource struct {
// Number of External IPs
ExtIPs int64 `json:"extips"`
// External traffic
ExtTraffic int64 `json:"exttraffic"`
// Number of grafic cores
GPU int64 `json:"gpu"`

View File

@@ -34,12 +34,11 @@ package rg
func makeQuotaRecord(arg_list []interface{}) QuotaRecord {
quota := QuotaRecord{
Cpu: -1,
Ram: -1,
Disk: -1,
ExtTraffic: -1,
ExtIPs: -1,
GpuUnits: -1,
Cpu: -1,
Ram: -1,
Disk: -1,
ExtIPs: -1,
GpuUnits: -1,
}
if len(arg_list) != 0 {
subres_data := arg_list[0].(map[string]interface{})
@@ -56,10 +55,6 @@ func makeQuotaRecord(arg_list []interface{}) QuotaRecord {
quota.Ram = subres_data["ram"].(int) // RAM volume in MB
}
if subres_data["ext_traffic"].(int) > 0 {
quota.ExtTraffic = subres_data["ext_traffic"].(int)
}
if subres_data["ext_ips"].(int) > 0 {
quota.ExtIPs = subres_data["ext_ips"].(int)
}
@@ -77,7 +72,6 @@ func makeQuotaRecord(arg_list []interface{}) QuotaRecord {
// quota_map["cpu"] = quota.Cpu
// quota_map["ram"] = quota.Ram // NB: this is float64, unlike the rest of values
// quota_map["disk"] = quota.Disk
// quota_map["ext_traffic"] = quota.ExtTraffic
// quota_map["ext_ips"] = quota.ExtIPs
// quota_map["gpu_units"] = quota.GpuUnits

View File

@@ -127,14 +127,6 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
req.MaxNumPublicIP = maxNumPublicIP
}
}
if quotaMap["ext_traffic"] != nil {
maxNP := int64(quotaMap["ext_traffic"].(int))
if maxNP == 0 {
req.MaxNetworkPeerTransfer = -1
} else {
req.MaxNetworkPeerTransfer = maxNP
}
}
}
if defNetType, ok := d.GetOk("def_net_type"); ok {
req.DefNet = defNetType.(string) // NOTE: in API default network type is set by "def_net" parameter
@@ -640,12 +632,6 @@ func ResourceRgSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Limit on the total volume of virtual storage resources in this resource group, specified in GB.",
},
"ext_traffic": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Limit on the total ingress network traffic for this resource group, specified in GB.",
},
"ext_ips": {
Type: schema.TypeInt,
Optional: true,
@@ -913,10 +899,6 @@ func ResourceRgSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,

View File

@@ -11,12 +11,11 @@ func resourceRGStateUpgradeV1(ctx context.Context, rawState map[string]interface
quota, ok := rawState["quota"]
if !ok || quota == nil {
rawState["quota"] = QuotaRecord{
Cpu: -1,
Ram: -1,
Disk: -1,
ExtTraffic: -1,
ExtIPs: -1,
GpuUnits: -1,
Cpu: -1,
Ram: -1,
Disk: -1,
ExtIPs: -1,
GpuUnits: -1,
}
return rawState, nil
}

View File

@@ -113,14 +113,6 @@ func utilityUpdateRG(ctx context.Context, d *schema.ResourceData, m interface{},
req.MaxNumPublicIP = maxNumPublicIP
}
}
if resLimitsConv["ext_traffic"] != nil {
maxNP := int64(resLimitsConv["ext_traffic"].(int))
if maxNP == 0 {
req.MaxNetworkPeerTransfer = -1
} else {
req.MaxNetworkPeerTransfer = maxNP
}
}
}
}

View File

@@ -1,110 +0,0 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package stack
import (
"context"
"strconv"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func dataSourceStackRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
stack, err := utilityStackCheckPresence(ctx, d, m)
if err != nil {
d.SetId("") // ensure ID is empty in this case
return diag.FromErr(err)
}
d.SetId(strconv.Itoa(d.Get("stack_id").(int)))
flattenStack(d, stack)
return nil
}
func dataSourceStackSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"stack_id": {
Type: schema.TypeInt,
Required: true,
},
"cpu_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"descr": {
Type: schema.TypeString,
Computed: true,
},
"drivers": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"mem_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
}
return res
}
func DataSourceStack() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceStackRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceStackSchemaMake(),
}
}

View File

@@ -1,142 +0,0 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package stack
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceStackListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
stackList, err := utilityStackListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenStackList(stackList))
d.Set("entry_count", stackList.EntryCount)
return nil
}
func dataSourceStackListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by name",
},
"type": {
Type: schema.TypeString,
Optional: true,
Description: "Find by type",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Find by status",
},
"sort_by": {
Type: schema.TypeString,
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"stack_id": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceStackList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceStackListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceStackListSchemaMake(),
}
}

View File

@@ -1,69 +0,0 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package stack
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/stack"
)
func flattenStackList(stackl *stack.ListStacks) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, stack := range stackl.Data {
temp := map[string]interface{}{
"stack_id": stack.ID,
"name": stack.Name,
"status": stack.Status,
"type": stack.Type,
}
res = append(res, temp)
}
return res
}
func flattenStack(d *schema.ResourceData, details *stack.InfoStack) error {
log.Debugf("flattenStack: decoded Stack name %q / ID %d",
details.Name, details.ID)
d.Set("stack_id", details.ID)
d.Set("cpu_allocation_ratio", details.CPUAllocationRatio)
d.Set("name", details.Name)
d.Set("descr", details.Descr)
d.Set("mem_allocation_ratio", details.MemAllocationRatio)
d.Set("status", details.Status)
d.Set("type", details.Type)
d.Set("drivers", details.Drivers)
return nil
}

View File

@@ -1,62 +0,0 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package stack
import (
"context"
"strconv"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/stack"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityStackCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*stack.InfoStack, error) {
c := m.(*controller.ControllerCfg)
req := stack.GetRequest{}
if d.Id() != "" {
stackId, _ := strconv.ParseUint(d.Id(), 10, 64)
req.StackId = stackId
} else {
req.StackId = uint64(d.Get("stack_id").(int))
}
stackData, err := c.CloudAPI().Stack().Get(ctx, req)
if err != nil {
return nil, err
}
return stackData, nil
}

View File

@@ -1,82 +0,0 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package stack
import (
"context"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/stack"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityStackListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*stack.ListStacks, error) {
c := m.(*controller.ControllerCfg)
req := stack.ListRequest{}
if byId, ok := d.GetOk("by_id"); ok {
req.ByID = uint64(byId.(int))
}
if name, ok := d.GetOk("name"); ok {
req.Name = name.(string)
}
if status, ok := d.GetOk("status"); ok {
req.Status = status.(string)
}
if stackType, ok := d.GetOk("type"); ok {
req.Type = stackType.(string)
}
if sortBy, ok := d.GetOk("sort_by"); ok {
req.SortBy = sortBy.(string)
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
log.Debugf("utilityStackListCheckPresence: load stack list")
stackList, err := c.CloudAPI().Stack().List(ctx, req)
if err != nil {
return nil, err
}
return stackList, nil
}

View File

@@ -20,9 +20,10 @@ func flattenAccessSEPPools(accessSEPPools stpolicy.ListAccessSEPPools) []map[str
res := make([]map[string]interface{}, 0, len(accessSEPPools))
for _, asp := range accessSEPPools {
temp := map[string]interface{}{
"sep_id": asp.SEPID,
"sep_name": asp.Name,
"pool_names": asp.PoolNames,
"sep_id": asp.SEPID,
"sep_name": asp.Name,
"pool_names": asp.PoolNames,
"sep_tech_status": asp.SepTechStatus,
}
res = append(res, temp)

View File

@@ -1,6 +1,9 @@
package stpolicy
import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
func dataSourceStoragePolicySchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
@@ -44,6 +47,10 @@ func dataSourceStoragePolicySchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"sep_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
@@ -120,6 +127,11 @@ func dataSourceStoragePolicyListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Optional: true,
},
"sep_tech_status": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"ENABLED", "DISABLED"}, true),
},
"pool_name": {
Type: schema.TypeString,
Optional: true,
@@ -174,6 +186,10 @@ func dataSourceStoragePolicyListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"sep_tech_status": {
Type: schema.TypeString,
Computed: true,
},
},
},
},

View File

@@ -56,6 +56,10 @@ func utilityStoragePolicyListCheckPresence(ctx context.Context, d *schema.Resour
req.SepID = uint64(SEPID.(int))
}
if SEPtechstatus, ok := d.GetOk("sep_tech_status"); ok {
req.SepTechStatus = SEPtechstatus.(string)
}
if poolName, ok := d.GetOk("pool_name"); ok {
req.PoolName = poolName.(string)
}

View File

@@ -18,6 +18,7 @@ func flattenTrunk(d *schema.ResourceData, trunkItem *trunk.ItemTrunk) {
d.Set("account_ids", trunkItem.AccountIDs)
d.Set("ovs_bridge", trunkItem.OVSBridge)
d.Set("native_vlan_id", trunkItem.NativeVLANID)
d.Set("mtu", trunkItem.MTU)
d.Set("status", trunkItem.Status)
d.Set("trunk_tags", trunkItem.TrunkTags)
d.Set("created_at", trunkItem.CreatedAt)
@@ -44,6 +45,7 @@ func flattenTrunkList(trunkList *trunk.ListTrunks) []map[string]interface{} {
"name": trunkItem.Name,
"native_vlan_id": trunkItem.NativeVLANID,
"ovs_bridge": trunkItem.OVSBridge,
"mtu": trunkItem.MTU,
"status": trunkItem.Status,
"trunk_tags": trunkItem.TrunkTags,
"updated_at": trunkItem.UpdatedAt,

View File

@@ -52,6 +52,11 @@ func dataSourceTrunkSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Native VLAN ID",
},
"mtu": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum Transmission Unit",
},
"status": {
Type: schema.TypeString,
Computed: true,
@@ -208,6 +213,11 @@ func dataSourceTrunkListSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "OVS bridge name",
},
"mtu": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum Transmission Unit",
},
"status": {
Type: schema.TypeString,
Computed: true,

View File

@@ -84,7 +84,7 @@ func vnfConfigResourcesSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},

View File

@@ -52,10 +52,10 @@ func flattenMGMT(mgmt vins.RecordMGMT) []map[string]interface{} {
func flattenResources(resources vins.RecordResources) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"cpu": resources.CPU,
"ram": resources.RAM,
"stack_id": resources.StackID,
"uuid": resources.UUID,
"cpu": resources.CPU,
"ram": resources.RAM,
"node_id": resources.NodeID,
"uuid": resources.UUID,
}
res = append(res, temp)
return res

View File

@@ -58,6 +58,10 @@ func dataSourceZoneSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"auto_start": {
Type: schema.TypeBool,
Required: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -115,6 +115,10 @@ func dataSourceZoneListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"auto_start": {
Type: schema.TypeBool,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -59,6 +59,7 @@ func flattenZone(d *schema.ResourceData, item *zone.RecordZone) error {
d.Set("lb_ids", item.LBIDs)
d.Set("bservice_ids", item.BserviceIDs)
d.Set("k8s_ids", item.K8SIDs)
d.Set("auto_start", item.AutoStart)
log.Debugf("flattenZone: decoded RecordZone name %q / ID %d, complete",
item.Name, item.ID)
@@ -80,6 +81,7 @@ func flattenZoneList(zone *zone.ListZones) []map[string]interface{} {
"created_time": zone.CreatedTime,
"updated_time": zone.UpdatedTime,
"node_ids": zone.NodeIDs,
"auto_start": zone.AutoStart,
}
res = append(res, temp)
}

View File

@@ -120,7 +120,6 @@ func flattenAccLimits(l account.Limits) []map[string]interface{} {
"disksize": l.DiskSize,
"disksizemax": l.DiskSizeMax,
"extips": l.ExtIPs,
"exttraffic": l.ExtTraffic,
"gpu": l.GPU,
"ram": l.RAM,
"seps": l.SEPs,
@@ -136,7 +135,6 @@ func flattenAccResource(r account.Resource) []map[string]interface{} {
"disksize": r.DiskSize,
"disksizemax": r.DiskSizeMax,
"extips": r.ExtIPs,
"exttraffic": r.ExtTraffic,
"gpu": r.GPU,
"ram": r.RAM,
"seps": flattenAccountSeps(r.SEPs),
@@ -190,7 +188,6 @@ func flattenRgResourceLimits(rl account.ResourceLimits) []map[string]interface{}
"cu_dm": rl.CuDM,
"cu_i": rl.CuI,
"cu_m": rl.CuM,
"cu_np": rl.CuNP,
"gpu_units": rl.GPUUnits,
"storage_policy": flattenSTPolicy(rl.StoragePolicies),
}

View File

@@ -126,14 +126,6 @@ func resourceAccountCreate(ctx context.Context, d *schema.ResourceData, m interf
req.MaxNumPublicIP = maxNumPublicIP
}
}
if resLimitsConv["cu_np"] != nil {
maxNP := int64(resLimitsConv["cu_np"].(float64))
if maxNP == 0 {
req.MaxNetworkPeerTransfer = -1
} else {
req.MaxNetworkPeerTransfer = maxNP
}
}
if resLimitsConv["gpu_units"] != nil {
gpuUnits := int64(resLimitsConv["gpu_units"].(float64))
if gpuUnits == 0 {

View File

@@ -138,11 +138,6 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
Optional: true,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Optional: true,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Optional: true,
@@ -665,10 +660,6 @@ func dataSourceAccountListDeletedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
@@ -1024,10 +1015,6 @@ func dataSourceAccountResourceConsumptionGetSchemaMake() map[string]*schema.Sche
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -1128,10 +1115,6 @@ func dataSourceAccountResourceConsumptionGetSchemaMake() map[string]*schema.Sche
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -1236,10 +1219,6 @@ func dataSourceAccountResourceConsumptionGetSchemaMake() map[string]*schema.Sche
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
@@ -1427,10 +1406,6 @@ func dataSourceAccountListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
@@ -1548,10 +1523,6 @@ func dataSourceAccountResourceConsumptionListSchemaMake() map[string]*schema.Sch
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -1652,10 +1623,6 @@ func dataSourceAccountResourceConsumptionListSchemaMake() map[string]*schema.Sch
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -1842,10 +1809,6 @@ func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -1902,10 +1865,6 @@ func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -1942,10 +1901,6 @@ func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -2331,10 +2286,6 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,

View File

@@ -271,14 +271,6 @@ func utilityAccountUpdate(ctx context.Context, d *schema.ResourceData, m interfa
req.MaxNumPublicIP = int64(maxNumPublicIP)
}
}
if resLimitConv["cu_np"] != nil {
maxNP := int(resLimitConv["cu_np"].(float64))
if maxNP == 0 {
req.MaxNetworkPeerTransfer = -1
} else {
req.MaxNetworkPeerTransfer = int64(maxNP)
}
}
if resLimitConv["gpu_units"] != nil {
gpuUnits := int(resLimitConv["gpu_units"].(float64))
if gpuUnits == 0 {

View File

@@ -50,11 +50,13 @@ func flattenAudit(d *schema.ResourceData, au *audit.RecordAudit) {
d.Set("responsetime", au.ResponseTime)
d.Set("result", au.Result)
d.Set("status_code", au.StatusCode)
d.Set("tags", au.Tags)
d.Set("timestamp", au.Timestamp)
d.Set("timestamp_end", au.TimestampEnd)
d.Set("ttl", au.TTL)
d.Set("user", au.User)
d.Set("resgroup_id", au.ResgroupID)
d.Set("account_id", au.AccountID)
d.Set("compute_id", au.ComputeID)
}
func flattenAuditList(au *audit.ListAudits) []map[string]interface{} {

View File

@@ -46,11 +46,6 @@ func dataSourceAuditSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
//TODO
//"tags": {
// Type: schema.TypeString,
// Computed: true,
//},
"timestamp": {
Type: schema.TypeFloat,
Computed: true,
@@ -67,6 +62,18 @@ func dataSourceAuditSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"resgroup_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
},
}
}

View File

@@ -52,7 +52,7 @@ func utilityAuditCheckPresence(ctx context.Context, d *schema.ResourceData, m in
req.AuditGuid = d.Get("audit_guid").(string)
}
log.Debugf("utilityStackCheckPresence: load stack")
log.Debugf("utilityAuditCheckPresence: load audit")
auditInfo, err := c.CloudBroker().Audit().Get(ctx, req)
if err != nil {
return nil, err

View File

@@ -13,6 +13,7 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
d.Set("account_id", disk.AccountID)
d.Set("account_name", disk.AccountName)
d.Set("acl", string(diskAcl))
d.Set("blk_discard", disk.BLKDiscard)
d.Set("boot_partition", disk.BootPartition)
d.Set("computes", flattenDiskComputes(disk.Computes))
d.Set("created_by", disk.CreatedBy)
@@ -53,6 +54,7 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
d.Set("sep_id", disk.SEPID)
d.Set("sep_type", disk.SEPType)
d.Set("shareable", disk.Shareable)
d.Set("cache", disk.Cache)
d.Set("size_available", disk.SizeAvailable)
d.Set("size_max", disk.SizeMax)
d.Set("size_used", disk.SizeUsed)
@@ -108,6 +110,7 @@ func flattenDiskReplica(d *schema.ResourceData, disk *disks.RecordDisk, statusRe
d.Set("sep_id", disk.SEPID)
d.Set("sep_type", disk.SEPType)
d.Set("shareable", disk.Shareable)
d.Set("cache", disk.Cache)
d.Set("size_max", disk.SizeMax)
d.Set("size_used", disk.SizeUsed)
d.Set("snapshots", flattendDiskSnapshotList(disk.Snapshots))
@@ -183,6 +186,7 @@ func flattenDiskList(dl *disks.ListDisks) []map[string]interface{} {
"account_id": disk.AccountID,
"account_name": disk.AccountName,
"acl": string(diskAcl),
"blk_discard": disk.BLKDiscard,
"boot_partition": disk.BootPartition,
"computes": flattenDiskComputes(disk.Computes),
"created_by": disk.CreatedBy,
@@ -221,6 +225,8 @@ func flattenDiskList(dl *disks.ListDisks) []map[string]interface{} {
"role": disk.Role,
"sep_id": disk.SEPID,
"sep_type": disk.SEPType,
"shareable": disk.Shareable,
"cache": disk.Cache,
"size_available": disk.SizeAvailable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
@@ -293,11 +299,11 @@ func flattenDiskListUnattached(ul *disks.ListUnattachedDisks) []map[string]inter
for _, unattachedDisk := range ul.Data {
unattachedDiskAcl, _ := json.Marshal(unattachedDisk.ACL)
tmp := map[string]interface{}{
"_ckey": unattachedDisk.CKey,
"_meta": flattens.FlattenMeta(unattachedDisk.Meta),
"account_id": unattachedDisk.AccountID,
"account_name": unattachedDisk.AccountName,
"acl": string(unattachedDiskAcl),
"blk_discard": unattachedDisk.BLKDiscard,
"boot_partition": unattachedDisk.BootPartition,
"created_time": unattachedDisk.CreatedTime,
"deleted_time": unattachedDisk.DeletedTime,
@@ -330,6 +336,7 @@ func flattenDiskListUnattached(ul *disks.ListUnattachedDisks) []map[string]inter
"role": unattachedDisk.Role,
"sep_id": unattachedDisk.SEPID,
"shareable": unattachedDisk.Shareable,
"cache": unattachedDisk.Cache,
"size_max": unattachedDisk.SizeMax,
"size_used": unattachedDisk.SizeUsed,
"snapshots": flattenDiskSnapshotList(unattachedDisk.Snapshots),

View File

@@ -75,6 +75,14 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
req.Pool = pool.(string)
}
if cache, ok := d.GetOk("cache"); ok {
req.Cache = cache.(string)
}
if blkDiscard, ok := d.GetOk("blk_discard"); ok {
req.BLKDiscard = blkDiscard.(bool)
}
diskID, err := c.CloudBroker().Disks().Create(ctx, req)
if err != nil {
d.SetId("")
@@ -236,6 +244,18 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
}
}
if d.HasChange("cache") {
if err := resourceDiskChangeCache(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("blk_discard") {
if err := resourceDiskChangeBLKDiscard(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("node_ids") {
log.Debugf("resourceDiskUpdate: present for disk %d", d.Get("disk_id"))
if err := resourceDiskChangeNodes(ctx, d, m, false); err != nil {
@@ -362,6 +382,26 @@ func resourceDiskChangeStoragePolicyID(ctx context.Context, d *schema.ResourceDa
return err
}
func resourceDiskChangeCache(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
_, err := c.CloudBroker().Disks().Update(ctx, disks.UpdateRequest{
DiskID: uint64(d.Get("disk_id").(int)),
Cache: d.Get("cache").(string),
})
return err
}
func resourceDiskChangeBLKDiscard(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
_, err := c.CloudBroker().Disks().Update(ctx, disks.UpdateRequest{
DiskID: uint64(d.Get("disk_id").(int)),
BLKDiscard: d.Get("blk_discard").(bool),
})
return err
}
func resourceDiskChangeNodes(ctx context.Context, d *schema.ResourceData, m interface{}, afterCreate bool) error {
c := m.(*controller.ControllerCfg)
diskID := uint64(d.Get("disk_id").(int))

View File

@@ -22,6 +22,10 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"blk_discard": {
Type: schema.TypeBool,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
@@ -281,6 +285,10 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"size_available": {
Type: schema.TypeFloat,
Computed: true,
@@ -456,6 +464,10 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"blk_discard": {
Type: schema.TypeBool,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
@@ -719,6 +731,10 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"size_available": {
Type: schema.TypeFloat,
Computed: true,
@@ -880,6 +896,10 @@ func dataSourceDiskListDeletedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"blk_discard": {
Type: schema.TypeBool,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
@@ -1143,6 +1163,10 @@ func dataSourceDiskListDeletedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"size_available": {
Type: schema.TypeFloat,
Computed: true,
@@ -1400,11 +1424,6 @@ func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"_ckey": {
Type: schema.TypeString,
Computed: true,
Description: "CKey",
},
"_meta": {
Type: schema.TypeList,
Computed: true,
@@ -1427,6 +1446,10 @@ func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"blk_discard": {
Type: schema.TypeBool,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
@@ -1659,6 +1682,10 @@ func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
@@ -1964,12 +1991,23 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Optional: true,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Optional: true,
Default: "none",
Description: "Cache mode for the disk",
},
"restore": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "restore deleting disk",
},
"blk_discard": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
@@ -2569,6 +2607,10 @@ func dataSourceDiskReplicationSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
@@ -2939,6 +2981,10 @@ func resourceDiskReplicationSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -332,15 +332,15 @@ func handleRestartUpdate(ctx context.Context, d *schema.ResourceData, c *control
}
func handleMigrateUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error {
stackId := uint64(d.Get("migrate").(int))
nodeId := uint64(d.Get("migrate").(int))
if err := ic.ExistStack(ctx, stackId, c); err != nil {
if err := ic.ExistNode(ctx, nodeId, c); err != nil {
return err
}
req := extnet.DeviceMigrateRequest{
NetID: recNet.ID,
StackID: stackId,
NetID: recNet.ID,
NodeID: nodeId,
}
_, err := c.CloudBroker().ExtNet().DeviceMigrate(ctx, req)

View File

@@ -63,8 +63,6 @@ func flattenFlipgroup(d *schema.ResourceData, flip *flipgroup.RecordFLIPGroup) {
d.Set("net_id", flip.NetID)
d.Set("net_type", flip.NetType)
d.Set("network", flip.Network)
d.Set("rg_id", flip.RGID)
d.Set("rg_name", flip.RGName)
d.Set("status", flip.Status)
d.Set("updated_by", flip.UpdatedBy)
d.Set("updated_time", flip.UpdatedTime)

View File

@@ -123,16 +123,6 @@ func dataSourceFlipgroupSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "network",
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
Description: "rg_id",
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
Description: "rg_name",
},
"status": {
Type: schema.TypeString,
Computed: true,
@@ -179,11 +169,6 @@ func dataSourceFlipgroupsListSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "by_ip",
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "rg_id",
},
"by_id": {
Type: schema.TypeInt,
Optional: true,
@@ -464,16 +449,6 @@ func resourceFlipgroupSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "network",
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
Description: "rg_id",
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
Description: "rg_name",
},
"status": {
Type: schema.TypeString,
Computed: true,

View File

@@ -63,9 +63,6 @@ func utilityFlipgroupListCheckPresence(ctx context.Context, d *schema.ResourceDa
if byIp, ok := d.GetOk("by_ip"); ok {
req.ByIP = byIp.(string)
}
if rgId, ok := d.GetOk("rg_id"); ok {
req.RGID = uint64(rgId.(int))
}
if byID, ok := d.GetOk("by_id"); ok {
req.ByID = uint64(byID.(int))
}

View File

@@ -73,7 +73,6 @@ func flattenGridRecordResource(rr grid.RecordResource) []map[string]interface{}
"disk_size": rr.DiskSize,
"disk_size_max": rr.DiskSizeMax,
"ext_ips": rr.ExtIPs,
"ext_traffic": rr.ExtTraffic,
"gpu": rr.GPU,
"ram": rr.RAM,
"seps": flattenGridSeps(rr.SEPs),

View File

@@ -124,10 +124,6 @@ func dataSourceGridListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -184,10 +180,6 @@ func dataSourceGridListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -338,10 +330,6 @@ func dataSourceGridGetConsumptionSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -398,10 +386,6 @@ func dataSourceGridGetConsumptionSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -469,10 +453,6 @@ func dataSourceGridListConsumptionSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -529,10 +509,6 @@ func dataSourceGridListConsumptionSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -15,8 +15,8 @@ import (
cb_k8ci "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8ci"
cb_k8s "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s"
cb_lb "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb"
cb_node "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/node"
cb_rg "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
cb_stack "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/stack"
cb_trunk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/trunk"
cb_vfpool "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vfpool"
cb_vins "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins"
@@ -467,53 +467,23 @@ func ExistGID(ctx context.Context, gid uint64, c *controller.ControllerCfg) erro
return fmt.Errorf("GID with id %v not found", gid)
}
func ExistStack(ctx context.Context, stackId uint64, c *controller.ControllerCfg) error {
req := cb_stack.ListRequest{
ByID: stackId,
func ExistNode(ctx context.Context, nodeId uint64, c *controller.ControllerCfg) error {
req := cb_node.ListRequest{
ByID: nodeId,
}
stackList, err := c.CloudBroker().Stack().List(ctx, req)
nodeList, err := c.CloudBroker().Node().List(ctx, req)
if err != nil {
return err
}
if len(stackList.Data) == 0 {
return fmt.Errorf("stack with id %v not found", stackList)
if len(nodeList.Data) == 0 {
return fmt.Errorf("node with id %v not found", nodeList)
}
return nil
}
// ExistStackInPcidevice checks if compute exists with specified stackId and specified non-deleted rgId.
func ExistStackInPcidevice(ctx context.Context, stackId, rgId uint64, c *controller.ControllerCfg) error {
req := cb_rg.ListRequest{
ByID: rgId,
IncludeDeleted: false,
}
rgList, err := c.CloudBroker().RG().List(ctx, req)
if err != nil {
return err
}
for _, v := range rgList.Data {
for _, idVM := range v.VMs {
req := cb_compute.GetRequest{
ComputeID: idVM,
}
computeRec, err := c.CloudBroker().Compute().Get(ctx, req)
if err != nil {
return err
}
if computeRec.StackID == stackId {
return nil
}
}
}
return fmt.Errorf("no compute found with stack_id %v and rg_id %v", stackId, rgId)
}
func ExistLB(ctx context.Context, lbId uint64, c *controller.ControllerCfg) error {
req := cb_lb.ListRequest{

View File

@@ -1,69 +0,0 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceImageListStacksRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
imageListStacks, err := utilityImageListStacksCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenImageListStacks(imageListStacks))
d.Set("entry_count", imageListStacks.EntryCount)
return nil
}
func DataSourceImageListStacks() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceImageListStacksRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceImageListStacksSchemaMake(),
}
}

View File

@@ -161,40 +161,6 @@ func flattenEco(m interface{}) string {
return output
}
func flattenImageListStacks(stack *image.ListStacks) []map[string]interface{} {
log.Debug("flattenImageListStacks")
temp := make([]map[string]interface{}, 0, len(stack.Data))
for _, item := range stack.Data {
t := map[string]interface{}{
"ckey": item.CKey,
//"meta": flattens.FlattenMeta(item.Meta),
"api_url": item.APIURL,
"api_key": item.APIKey,
"app_id": item.AppID,
"cpu_allocation_ratio": item.CPUAllocationRatio,
"desc": item.Description,
"descr": item.Descr,
"drivers": item.Drivers,
"eco": flattenEco(item.Eco),
"error": item.Error,
"gid": item.GID,
"guid": item.GUID,
"id": item.ID,
"images": item.Images,
"login": item.Login,
"mem_allocation_ratio": item.MemAllocationRatio,
"name": item.Name,
"packages": flattenPackages(item.Packages),
"passwd": item.Password,
"reference_id": item.ReferenceID,
"status": item.Status,
"type": item.Type,
}
temp = append(temp, t)
}
return temp
}
func flattenPackages(pg image.Packages) []map[string]interface{} {
log.Debug("flattenPackages")
res := make([]map[string]interface{}, 0)

View File

@@ -36,291 +36,6 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
func dataSourceImageListStacksSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"image_id": {
Type: schema.TypeInt,
Required: true,
Description: "image id",
},
"sort_by": {
Type: schema.TypeString,
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "page size",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "find by name",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "find by status",
},
"type_image": {
Type: schema.TypeString,
Optional: true,
Description: "find by type",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"api_url": {
Type: schema.TypeString,
Computed: true,
},
"api_key": {
Type: schema.TypeString,
Computed: true,
},
"app_id": {
Type: schema.TypeString,
Computed: true,
},
"cpu_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"descr": {
Type: schema.TypeString,
Computed: true,
},
"drivers": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"eco": {
Type: schema.TypeString,
Computed: true,
},
"error": {
Type: schema.TypeInt,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"login": {
Type: schema.TypeString,
Computed: true,
},
"mem_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"packages": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"libvirt_bin": {
Type: schema.TypeList,
Computed: true,
Description: "libvirt_bin",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"libvirt_daemon": {
Type: schema.TypeList,
Computed: true,
Description: "libvirt_daemon",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"lvm2_lockd": {
Type: schema.TypeList,
Computed: true,
Description: "lvm2_lockd",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"openvswitch_common": {
Type: schema.TypeList,
Computed: true,
Description: "openvswitch_common",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"openvswitch_switch": {
Type: schema.TypeList,
Computed: true,
Description: "openvswitch_switch",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"qemu_system_x86": {
Type: schema.TypeList,
Computed: true,
Description: "qemu_system_x86",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"sanlock": {
Type: schema.TypeList,
Computed: true,
Description: "sanlock",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
},
},
},
"passwd": {
Type: schema.TypeString,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
},
},
Description: "items of stacks list",
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
Description: "entry count",
},
}
}
func dataSourceImageListSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"sep_id": {
@@ -1017,10 +732,6 @@ func resourceCDROMImageSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "unc path",
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"acl": {
Type: schema.TypeList,
Computed: true,
@@ -1531,10 +1242,6 @@ func resourceVirtualImageSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "unc path",
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"acl": {
Type: schema.TypeList,
Computed: true,

View File

@@ -1,76 +0,0 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityImageListStacksCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*image.ListStacks, error) {
c := m.(*controller.ControllerCfg)
req := image.ListStacksRequest{
ImageID: uint64(d.Get("image_id").(int)),
}
if sortBy, ok := d.GetOk("sort_by"); ok {
req.SortBy = sortBy.(string)
}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
if name, ok := d.GetOk("name"); ok {
req.Name = name.(string)
}
if status, ok := d.GetOk("status"); ok {
req.Status = status.(string)
}
if typeImage, ok := d.GetOk("type_image"); ok {
req.Type = typeImage.(string)
}
log.Debugf("utilityImageListStacksCheckPresence: load image list")
imageListStacks, err := c.CloudBroker().Image().ListStacks(ctx, req)
if err != nil {
return nil, err
}
return imageListStacks, nil
}

View File

@@ -1466,12 +1466,14 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "insert ssl certificate in x509 pem format",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Description: "Type of the emulated system.",
},
"lb_sysctl_params": {
"chipset": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"Q35", "i440fx"}, false),
Default: "Q35",
Description: "Type of the emulated system. Possible values: i440fx, Q35. Default: Q35",
},
"lb_sysctl_params": {
Type: schema.TypeList,
Optional: true,
Description: "Custom sysctl values for Load Balancer instance. Applied on boot.",
@@ -1808,17 +1810,19 @@ func resourceK8sWgSchemaMake() map[string]*schema.Schema {
),
Description: "Node RAM in MB.",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Description: "Type of the emulated system.",
},
"disk": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Worker node boot disk size. If unspecified or 0, size is defined by OS image size.",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"Q35", "i440fx"}, false),
Default: "Q35",
Description: "Type of the emulated system. Possible values: i440fx, Q35. Default: Q35",
},
"disk": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Worker node boot disk size. If unspecified or 0, size is defined by OS image size.",
},
"labels": {
Type: schema.TypeList,
Computed: true,

View File

@@ -78,7 +78,7 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute, p
d.Set("need_reboot", computeRec.NeedReboot)
d.Set("numa_node_id", computeRec.NumaNodeId)
d.Set("os_users", flattenOSUsers(computeRec.OSUsers))
d.Set("pinned", computeRec.PinnedToStack)
d.Set("pinned", computeRec.PinnedToNode)
d.Set("preferred_cpu", computeRec.PreferredCPU)
d.Set("ram", computeRec.RAM)
d.Set("reference_id", computeRec.ReferenceID)
@@ -88,9 +88,9 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute, p
d.Set("rg_name", computeRec.RGName)
d.Set("rg_id", computeRec.RGID)
d.Set("snap_sets", flattenSnapSets(computeRec.SnapSets))
d.Set("stack_id", computeRec.StackID)
d.Set("stack_name", computeRec.StackName)
d.Set("started", computeRec.TechStatus == "STARTED")
d.Set("node_id", computeRec.NodeID)
d.Set("node_name", computeRec.NodeName)
d.Set("stateless_sep_id", computeRec.StatelessSEPID)
d.Set("stateless_sep_type", computeRec.StatelessSEPType)
d.Set("status", computeRec.Status)
@@ -100,11 +100,9 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute, p
d.Set("updated_time", computeRec.UpdatedTime)
d.Set("user_data", string(userData))
d.Set("user_managed", computeRec.UserManaged)
d.Set("read_only", computeRec.ReadOnly)
d.Set("vnc_password", computeRec.VNCPassword)
d.Set("vgpus", flattenVGPUs(computeRec.VGPUs))
//TODO
// d.Set("virtual_image_id", computeRec.VirtualImageID)
// d.Set("virtual_image_name", computeRec.VirtualImageName)
d.Set("pci_devices", flattenPCI(*pciList))
d.Set("loader_type", computeRec.LoaderType)
d.Set("boot_type", computeRec.BootType)
@@ -286,6 +284,8 @@ func flattenComputeDisks(disksList compute.ListDisks, disksBlocks, extraDisks []
"delete_by": disk.DeletedBy,
"delete_time": disk.DeletedTime,
"update_time": disk.UpdatedTime,
"cache": disk.Cache,
"blk_discard": disk.BLKDiscard,
}
res = append(res, temp)
indexDataDisks++
@@ -327,79 +327,78 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
devices, _ := json.Marshal(computeItem.Devices)
userData, _ := json.Marshal(computeItem.Userdata)
temp := map[string]interface{}{
"acl": flattenListACLInterface(computeItem.ACL),
"account_id": computeItem.AccountID,
"account_name": computeItem.AccountName,
"affinity_label": computeItem.AffinityLabel,
"affinity_rules": flattenListRules(computeItem.AffinityRules),
"affinity_weight": computeItem.AffinityWeight,
"anti_affinity_rules": flattenListRules(computeItem.AntiAffinityRules),
"arch": computeItem.Arch,
"auto_start_w_node": computeItem.AutoStart,
"chipset": computeItem.Chipset,
"cd_image_id": computeItem.CdImageId,
"boot_order": computeItem.BootOrder,
"boot_image_id": computeItem.BootImageID,
"bootdisk_size": computeItem.BootDiskSize,
"clone_reference": computeItem.CloneReference,
"clones": computeItem.Clones,
"computeci_id": computeItem.ComputeCIID,
"cpus": computeItem.CPUs,
"created_by": computeItem.CreatedBy,
"created_time": computeItem.CreatedTime,
"custom_fields": string(customFields),
"deleted_by": computeItem.DeletedBy,
"deleted_time": computeItem.DeletedTime,
"desc": computeItem.Description,
"devices": string(devices),
"disks": flattenDisks(computeItem.Disks),
"driver": computeItem.Driver,
"gid": computeItem.GID,
"guid": computeItem.GUID,
"hp_backed": computeItem.HPBacked,
"compute_id": computeItem.ID,
"cpu_pin": computeItem.CPUPin,
"interfaces": flattenInterfaces(computeItem.Interfaces),
"live_migration_job_id": computeItem.LiveMigrationJobID,
"lock_status": computeItem.LockStatus,
"manager_id": computeItem.ManagerID,
"manager_type": computeItem.ManagerType,
"migrationjob": computeItem.MigrationJob,
"milestones": computeItem.Milestones,
"nid": computeItem.NID,
"name": computeItem.Name,
"need_reboot": computeItem.NeedReboot,
"numa_affinity": computeItem.NumaAffinity,
"numa_node_id": computeItem.NumaNodeId,
"os_users": flattenOSUsers(computeItem.OSUsers),
"os_version": computeItem.OSVersion,
"pinned": computeItem.PinnedToStack,
"preferred_cpu": computeItem.PreferredCPU,
"qemu_guest": flattenQemuQuest(computeItem.QemuQuest),
"ram": computeItem.RAM,
"reference_id": computeItem.ReferenceID,
"registered": computeItem.Registered,
"res_name": computeItem.ResName,
"reserved_node_cpus": computeItem.ReservedNodeCpus,
"rg_id": computeItem.RGID,
"rg_name": computeItem.RGName,
"snap_sets": flattenSnapSets(computeItem.SnapSets),
"stack_id": computeItem.StackID,
"stack_name": computeItem.StackName,
"stateless_sep_id": computeItem.StatelessSEPID,
"stateless_sep_type": computeItem.StatelessSEPType,
"status": computeItem.Status,
"tags": flattenTags(computeItem.Tags),
"tech_status": computeItem.TechStatus,
"total_disk_size": computeItem.TotalDiskSize,
"updated_by": computeItem.UpdatedBy,
"updated_time": computeItem.UpdatedTime,
"user_data": string(userData),
"user_managed": computeItem.UserManaged,
"vgpus": computeItem.VGPUs,
"vins_connected": computeItem.VINSConnected,
//TODO
// "virtual_image_id": computeItem.VirtualImageID,
"acl": flattenListACLInterface(computeItem.ACL),
"account_id": computeItem.AccountID,
"account_name": computeItem.AccountName,
"affinity_label": computeItem.AffinityLabel,
"affinity_rules": flattenListRules(computeItem.AffinityRules),
"affinity_weight": computeItem.AffinityWeight,
"anti_affinity_rules": flattenListRules(computeItem.AntiAffinityRules),
"arch": computeItem.Arch,
"auto_start_w_node": computeItem.AutoStart,
"chipset": computeItem.Chipset,
"cd_image_id": computeItem.CdImageId,
"boot_order": computeItem.BootOrder,
"boot_image_id": computeItem.BootImageID,
"bootdisk_size": computeItem.BootDiskSize,
"clone_reference": computeItem.CloneReference,
"clones": computeItem.Clones,
"computeci_id": computeItem.ComputeCIID,
"cpus": computeItem.CPUs,
"created_by": computeItem.CreatedBy,
"created_time": computeItem.CreatedTime,
"custom_fields": string(customFields),
"deleted_by": computeItem.DeletedBy,
"deleted_time": computeItem.DeletedTime,
"desc": computeItem.Description,
"devices": string(devices),
"disks": flattenDisks(computeItem.Disks),
"driver": computeItem.Driver,
"gid": computeItem.GID,
"guid": computeItem.GUID,
"hp_backed": computeItem.HPBacked,
"compute_id": computeItem.ID,
"cpu_pin": computeItem.CPUPin,
"interfaces": flattenInterfaces(computeItem.Interfaces),
"live_migration_job_id": computeItem.LiveMigrationJobID,
"lock_status": computeItem.LockStatus,
"manager_id": computeItem.ManagerID,
"manager_type": computeItem.ManagerType,
"migrationjob": computeItem.MigrationJob,
"milestones": computeItem.Milestones,
"nid": computeItem.NID,
"name": computeItem.Name,
"need_reboot": computeItem.NeedReboot,
"numa_affinity": computeItem.NumaAffinity,
"numa_node_id": computeItem.NumaNodeId,
"os_users": flattenOSUsers(computeItem.OSUsers),
"os_version": computeItem.OSVersion,
"pinned": computeItem.PinnedToNode,
"preferred_cpu": computeItem.PreferredCPU,
"qemu_guest": flattenQemuQuest(computeItem.QemuQuest),
"ram": computeItem.RAM,
"reference_id": computeItem.ReferenceID,
"registered": computeItem.Registered,
"res_name": computeItem.ResName,
"reserved_node_cpus": computeItem.ReservedNodeCpus,
"rg_id": computeItem.RGID,
"rg_name": computeItem.RGName,
"snap_sets": flattenSnapSets(computeItem.SnapSets),
"node_id": computeItem.NodeID,
"node_name": computeItem.NodeName,
"stateless_sep_id": computeItem.StatelessSEPID,
"stateless_sep_type": computeItem.StatelessSEPType,
"status": computeItem.Status,
"tags": flattenTags(computeItem.Tags),
"tech_status": computeItem.TechStatus,
"total_disk_size": computeItem.TotalDiskSize,
"updated_by": computeItem.UpdatedBy,
"updated_time": computeItem.UpdatedTime,
"user_data": string(userData),
"user_managed": computeItem.UserManaged,
"read_only": computeItem.ReadOnly,
"vgpus": computeItem.VGPUs,
"vins_connected": computeItem.VINSConnected,
"loader_type": computeItem.LoaderType,
"boot_type": computeItem.BootType,
"hot_resize": computeItem.HotResize,
@@ -419,78 +418,74 @@ func flattenDeletedComputeList(computes *compute.ListDeletedComputes) []map[stri
devices, _ := json.Marshal(computeItem.Devices)
userData, _ := json.Marshal(computeItem.Userdata)
temp := map[string]interface{}{
"acl": flattenListACLInterface(computeItem.ACL),
"account_id": computeItem.AccountID,
"account_name": computeItem.AccountName,
"affinity_label": computeItem.AffinityLabel,
"affinity_rules": flattenListRules(computeItem.AffinityRules),
"affinity_weight": computeItem.AffinityWeight,
"anti_affinity_rules": flattenListRules(computeItem.AntiAffinityRules),
"arch": computeItem.Arch,
"auto_start_w_node": computeItem.AutoStart,
"chipset": computeItem.Chipset,
"cd_image_id": computeItem.CdImageId,
"boot_order": computeItem.BootOrder,
"bootdisk_size": computeItem.BootDiskSize,
"boot_image_id": computeItem.BootImageID,
"clone_reference": computeItem.CloneReference,
"clones": computeItem.Clones,
"computeci_id": computeItem.ComputeCIID,
"cpus": computeItem.CPUs,
"created_by": computeItem.CreatedBy,
"created_time": computeItem.CreatedTime,
"custom_fields": string(customFields),
"deleted_by": computeItem.DeletedBy,
"deleted_time": computeItem.DeletedTime,
"desc": computeItem.Description,
"devices": string(devices),
"disks": flattenDisks(computeItem.Disks),
"driver": computeItem.Driver,
"gid": computeItem.GID,
"guid": computeItem.GUID,
"hp_backed": computeItem.HPBacked,
"compute_id": computeItem.ID,
"cpu_pin": computeItem.CPUPin,
//TODO
// "image_id": computeItem.ImageID,
"interfaces": flattenInterfaces(computeItem.Interfaces),
"lock_status": computeItem.LockStatus,
"manager_id": computeItem.ManagerID,
"manager_type": computeItem.ManagerType,
"migrationjob": computeItem.MigrationJob,
"milestones": computeItem.Milestones,
"name": computeItem.Name,
"need_reboot": computeItem.NeedReboot,
"numa_affinity": computeItem.NumaAffinity,
"numa_node_id": computeItem.NumaNodeId,
"os_users": flattenOSUsers(computeItem.OSUsers),
"os_version": computeItem.OSVersion,
"pinned": computeItem.PinnedToStack,
"preferred_cpu": computeItem.PreferredCPU,
"ram": computeItem.RAM,
"reference_id": computeItem.ReferenceID,
"registered": computeItem.Registered,
"res_name": computeItem.ResName,
"reserved_node_cpus": computeItem.ReservedNodeCpus,
"rg_id": computeItem.RGID,
"rg_name": computeItem.RGName,
"snap_sets": flattenSnapSets(computeItem.SnapSets),
"stack_id": computeItem.StackID,
"stack_name": computeItem.StackName,
"stateless_sep_id": computeItem.StatelessSEPID,
"stateless_sep_type": computeItem.StatelessSEPType,
"status": computeItem.Status,
"tags": flattenTags(computeItem.Tags),
"tech_status": computeItem.TechStatus,
"total_disk_size": computeItem.TotalDiskSize,
"updated_by": computeItem.UpdatedBy,
"updated_time": computeItem.UpdatedTime,
"user_data": string(userData),
"user_managed": computeItem.UserManaged,
"vgpus": computeItem.VGPUs,
"vins_connected": computeItem.VINSConnected,
//TODO
// "virtual_image_id": computeItem.VirtualImageID,
"acl": flattenListACLInterface(computeItem.ACL),
"account_id": computeItem.AccountID,
"account_name": computeItem.AccountName,
"affinity_label": computeItem.AffinityLabel,
"affinity_rules": flattenListRules(computeItem.AffinityRules),
"affinity_weight": computeItem.AffinityWeight,
"anti_affinity_rules": flattenListRules(computeItem.AntiAffinityRules),
"arch": computeItem.Arch,
"auto_start_w_node": computeItem.AutoStart,
"chipset": computeItem.Chipset,
"cd_image_id": computeItem.CdImageId,
"boot_order": computeItem.BootOrder,
"bootdisk_size": computeItem.BootDiskSize,
"boot_image_id": computeItem.BootImageID,
"clone_reference": computeItem.CloneReference,
"clones": computeItem.Clones,
"computeci_id": computeItem.ComputeCIID,
"cpus": computeItem.CPUs,
"created_by": computeItem.CreatedBy,
"created_time": computeItem.CreatedTime,
"custom_fields": string(customFields),
"deleted_by": computeItem.DeletedBy,
"deleted_time": computeItem.DeletedTime,
"desc": computeItem.Description,
"devices": string(devices),
"disks": flattenDisks(computeItem.Disks),
"driver": computeItem.Driver,
"gid": computeItem.GID,
"guid": computeItem.GUID,
"hp_backed": computeItem.HPBacked,
"compute_id": computeItem.ID,
"cpu_pin": computeItem.CPUPin,
"interfaces": flattenInterfaces(computeItem.Interfaces),
"lock_status": computeItem.LockStatus,
"manager_id": computeItem.ManagerID,
"manager_type": computeItem.ManagerType,
"migrationjob": computeItem.MigrationJob,
"milestones": computeItem.Milestones,
"name": computeItem.Name,
"need_reboot": computeItem.NeedReboot,
"numa_affinity": computeItem.NumaAffinity,
"numa_node_id": computeItem.NumaNodeId,
"os_users": flattenOSUsers(computeItem.OSUsers),
"os_version": computeItem.OSVersion,
"pinned": computeItem.PinnedToNode,
"preferred_cpu": computeItem.PreferredCPU,
"ram": computeItem.RAM,
"reference_id": computeItem.ReferenceID,
"registered": computeItem.Registered,
"res_name": computeItem.ResName,
"reserved_node_cpus": computeItem.ReservedNodeCpus,
"rg_id": computeItem.RGID,
"rg_name": computeItem.RGName,
"snap_sets": flattenSnapSets(computeItem.SnapSets),
"node_id": computeItem.NodeID,
"node_name": computeItem.NodeName,
"stateless_sep_id": computeItem.StatelessSEPID,
"stateless_sep_type": computeItem.StatelessSEPType,
"status": computeItem.Status,
"tags": flattenTags(computeItem.Tags),
"tech_status": computeItem.TechStatus,
"total_disk_size": computeItem.TotalDiskSize,
"updated_by": computeItem.UpdatedBy,
"updated_time": computeItem.UpdatedTime,
"user_data": string(userData),
"user_managed": computeItem.UserManaged,
"vgpus": computeItem.VGPUs,
"vins_connected": computeItem.VINSConnected,
"loader_type": computeItem.LoaderType,
"boot_type": computeItem.BootType,
"hot_resize": computeItem.HotResize,
@@ -574,6 +569,7 @@ func flattenDisks(disks []compute.InfoDisk) []map[string]interface{} {
// "bus_number": disk.BusNumber,
"disk_id": disk.ID,
// "pci_slot": disk.PCISlot,
"sep_id": disk.SepID,
}
res = append(res, temp)
}
@@ -690,7 +686,7 @@ func flattenPCIDevice(deviceList []compute.ItemPCIDevice) []map[string]interface
"device_id": dev.ID,
"name": dev.Name,
"rg_id": dev.RGID,
"stack_id": dev.StackID,
"node_id": dev.NodeID,
"status": dev.Status,
"system_name": dev.SystemName,
}
@@ -809,7 +805,7 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
d.Set("numa_affinity", compFacts.NumaAffinity)
d.Set("numa_node_id", compFacts.NumaNodeId)
d.Set("os_users", flattenOSUsers(compFacts.OSUsers))
d.Set("pinned", compFacts.PinnedToStack)
d.Set("pinned", compFacts.PinnedToNode)
d.Set("preferred_cpu", compFacts.PreferredCPU)
d.Set("qemu_guest", flattenQemuQuest(compFacts.QemuQuest))
d.Set("ram", compFacts.RAM)
@@ -820,8 +816,8 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
d.Set("rg_id", compFacts.RGID)
d.Set("rg_name", compFacts.RGName)
d.Set("snap_sets", flattenSnapSets(compFacts.SnapSets))
d.Set("stack_id", compFacts.StackID)
d.Set("stack_name", compFacts.StackName)
d.Set("node_id", compFacts.NodeID)
d.Set("node_name", compFacts.NodeName)
d.Set("stateless_sep_id", compFacts.StatelessSEPID)
d.Set("stateless_sep_type", compFacts.StatelessSEPType)
d.Set("status", compFacts.Status)
@@ -831,11 +827,9 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
d.Set("updated_time", compFacts.UpdatedTime)
d.Set("user_data", string(userData))
d.Set("user_managed", compFacts.UserManaged)
d.Set("read_only", compFacts.ReadOnly)
d.Set("vnc_password", compFacts.VNCPassword)
d.Set("vgpus", flattenVGPUs(compFacts.VGPUs))
//TODO
// d.Set("virtual_image_id", compFacts.VirtualImageID)
// d.Set("virtual_image_name", compFacts.VirtualImageName)
d.Set("pci_devices", flattenPCI(*pciList))
d.Set("loader_type", compFacts.LoaderType)
d.Set("boot_type", compFacts.BootType)
@@ -872,6 +866,7 @@ func parseComputeInterfacesToNetworks(networks []interface{}, ifaces compute.Lis
elem["ip_address"] = value.IPAddress
elem["mac"] = value.MAC
elem["mtu"] = value.MTU
elem["net_mask"] = value.NetMask
elem["sdn_interface_id"] = value.SDNInterfaceID
elem["weight"] = flattenNetworkWeight(networks, value.NetID, value.NetType)
elem["enabled"] = value.Enabled
@@ -900,6 +895,7 @@ func flattenDisk(diskList compute.ListDisks) []map[string]interface{} {
"ckey": disk.CKey,
"meta": flattens.FlattenMeta(disk.Meta),
"account_id": disk.AccountID,
"blk_discard": disk.BLKDiscard,
"boot_partition": disk.BootPartition,
"bus_number": disk.BusNumber,
"created_time": disk.CreatedTime,

View File

@@ -73,8 +73,8 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
createReqX86.Pool = pool.(string)
}
if stackID, ok := d.GetOk("stack_id"); ok {
createReqX86.StackID = uint64(stackID.(int))
if nodeID, ok := d.GetOk("node_id"); ok {
createReqX86.NodeID = uint64(nodeID.(int))
}
if ipaType, ok := d.GetOk("ipa_type"); ok {
@@ -85,6 +85,10 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
createReqX86.BootDisk = uint64(bootSize.(int))
}
if bootDiskCache, ok := d.GetOk("boot_disk_cache"); ok {
createReqX86.BootDiskCache = bootDiskCache.(string)
}
if IS, ok := d.GetOk("is"); ok {
createReqX86.IS = IS.(string)
}
@@ -93,6 +97,10 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
createReqX86.ZoneID = uint64(zoneID.(int))
}
if bootDiskBLKDiscard, ok := d.GetOk("boot_disk_blk_discard"); ok {
createReqX86.BootDiskBLKDiscard = bootDiskBLKDiscard.(bool)
}
createReqX86.Interfaces = make([]kvmx86.Interface, 0)
if networks, ok := d.GetOk("network"); ok {
@@ -126,6 +134,12 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
reqInterface.MTU = uint64(netInterfaceVal["mtu"].(int))
}
if reqInterface.NetType == "DPDK" || reqInterface.NetType == "VFNIC" {
if netMask, netMaskSet := netInterfaceVal["net_mask"]; netMaskSet {
reqInterface.NetMask = uint64(netMask.(int))
}
}
ipaddr, ipSet := netInterfaceVal["ip_address"]
if ipSet {
reqInterface.IPAddr = ipaddr.(string)
@@ -170,6 +184,12 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
if imageID, ok := diskVal["image_id"]; ok {
reqDataDisk.ImageID = uint64(imageID.(int))
}
if cache, ok := diskVal["cache"]; ok {
reqDataDisk.Cache = cache.(string)
}
if blkDiscard, ok := diskVal["blk_discard"]; ok {
reqDataDisk.BLKDiscard = blkDiscard.(bool)
}
disksX86 = append(disksX86, reqDataDisk)
}
@@ -328,6 +348,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
if !cleanup {
if enabled, ok := d.GetOk("enabled"); ok {
if enabled.(bool) {
req := compute.EnableRequest{ComputeID: computeId}
@@ -424,6 +445,9 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
if start, ok := d.GetOk("started"); ok && start.(bool) {
req := compute.StartRequest{ComputeID: computeId}
if nodeID, ok := d.GetOk("node_id"); ok {
req.NodeID = uint64(nodeID.(int))
}
log.Debugf("resourceComputeCreate: starting Compute ID %d after completing its resource configuration", computeId)
if _, err := c.CloudBroker().Compute().Start(ctx, req); err != nil {
warnings.Add(err)
@@ -436,19 +460,16 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if pin, ok := d.GetOk("pin_to_stack"); ok && pin.(bool) {
if pin, ok := d.GetOk("pin_to_node"); ok && pin.(bool) {
start, _ := d.GetOk("started")
_, stackOk := d.GetOk("stack_id")
_, nodeOk := d.GetOk("node_id")
if !start.(bool) && !stackOk {
warnings.Add(errors.New("cannot pin to stack a VM, that is not started and stack_id is not set"))
if !start.(bool) && !nodeOk {
warnings.Add(errors.New("cannot pin to node a VM, that is not started and node_id is not set"))
} else {
req := compute.PinToStackRequest{
ComputeID: computeId,
}
if stackID, ok := d.Get("stack_id").(int); ok {
req.TargetStackID = uint64(stackID)
req := compute.PinToNodeRequest{
ComputeID: computeId,
TargetNodeID: uint64(d.Get("node_id").(int)),
}
if force, ok := d.Get("force_pin").(bool); ok {
@@ -459,7 +480,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
req.AutoStart = autoStart
}
_, err := c.CloudBroker().Compute().PinToStack(ctx, req)
_, err := c.CloudBroker().Compute().PinToNode(ctx, req)
if err != nil {
warnings.Add(err)
}
@@ -628,7 +649,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if !d.Get("pin_to_stack").(bool) && d.Get("auto_start_w_node").(bool) {
if !d.Get("pin_to_node").(bool) && d.Get("auto_start_w_node").(bool) {
req := compute.UpdateRequest{
ComputeID: computeId,
AutoStart: d.Get("auto_start_w_node").(bool),
@@ -658,6 +679,14 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if readOnly, ok := d.GetOk("read_only"); ok {
if readOnly.(bool) {
if err := utilityComputeUpdateReadOnly(ctx, d, m); err != nil {
warnings.Add(err)
}
}
}
}
log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", computeId, d.Get("name").(string))
@@ -805,6 +834,18 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if d.HasChange("boot_disk_cache") {
if err := utilityComputeUpdateBootDiskCache(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("boot_disk_blk_discard") {
if err := utilityComputeUpdateBootDiskBLKDiscard(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("extra_disks") {
err := utilityComputeExtraDisksConfigure(ctx, d, m, true) // pass do_delta = true to apply changes, if any
if err != nil {
@@ -812,8 +853,8 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if d.HasChange("pin_to_stack") {
if err := utilityComputePinToStack(ctx, d, m); err != nil {
if d.HasChange("pin_to_node") {
if err := utilityComputePinToNode(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
@@ -946,6 +987,12 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if d.HasChange("read_only") {
if err := utilityComputeUpdateReadOnly(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
return append(resourceComputeRead(ctx, d, m), warnings.Get()...)
}
@@ -980,12 +1027,12 @@ func ResourceCompute() *schema.Resource {
SchemaVersion: 2,
CustomizeDiff: func(ctx context.Context, diff *schema.ResourceDiff, i interface{}) error {
if diff.HasChanges() || diff.HasChanges("chipset", "pin_to_stack", "auto_start_w_node", "libvirt_settings", "network", "affinity_rules", "anti_affinity_rules",
if diff.HasChanges() || diff.HasChanges("chipset", "pin_to_node", "auto_start_w_node", "libvirt_settings", "network", "affinity_rules", "anti_affinity_rules",
"extra_disks", "tags", "port_forwarding", "user_access", "snapshot", "pci_devices", "preferred_cpu", "security_groups") {
diff.SetNewComputed("updated_time")
diff.SetNewComputed("updated_by")
}
if diff.HasChanges("pin_to_stack") {
if diff.HasChanges("pin_to_node") {
diff.SetNewComputed("pinned")
}
if diff.HasChanges("started") {

View File

@@ -228,6 +228,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"blk_discard": {
Type: schema.TypeBool,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
@@ -236,6 +240,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
@@ -939,11 +947,11 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},
"stack_name": {
"node_name": {
Type: schema.TypeString,
Computed: true,
},
@@ -1011,6 +1019,11 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"read_only": {
Type: schema.TypeBool,
Computed: true,
Description: "Shows if compute is locked to read-only operations.",
},
"vnc_password": {
Type: schema.TypeString,
Computed: true,
@@ -1100,14 +1113,6 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"virtual_image_id": {
Type: schema.TypeInt,
Computed: true,
},
"virtual_image_name": {
Type: schema.TypeString,
Computed: true,
},
//extra parameters
"boot_disk_size": {
Type: schema.TypeInt,
@@ -1181,7 +1186,7 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "Find by tech status",
},
"stack_name": {
"node_name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by node name.",
@@ -1196,10 +1201,10 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "Find by IP address",
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by stack ID",
Description: "Find by node ID",
},
"cd_image_id": {
Type: schema.TypeInt,
@@ -1460,6 +1465,10 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
@@ -1483,10 +1492,6 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
},
"interfaces": {
Type: schema.TypeList,
Computed: true,
@@ -1829,11 +1834,11 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
},
},
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},
"stack_name": {
"node_name": {
Type: schema.TypeString,
Computed: true,
Description: "Find by node name.",
@@ -1890,6 +1895,11 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"read_only": {
Type: schema.TypeBool,
Computed: true,
Description: "Shows if compute is in read-only mode.",
},
"vgpus": {
Type: schema.TypeList,
Computed: true,
@@ -1901,10 +1911,6 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"virtual_image_id": {
Type: schema.TypeInt,
Computed: true,
},
"loader_type": {
Type: schema.TypeString,
Computed: true,
@@ -2238,10 +2244,6 @@ func dataSourceComputeListDeletedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
},
"interfaces": {
Type: schema.TypeList,
Computed: true,
@@ -2545,11 +2547,11 @@ func dataSourceComputeListDeletedSchemaMake() map[string]*schema.Schema {
},
},
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},
"stack_name": {
"node_name": {
Type: schema.TypeString,
Computed: true,
Description: "Find by node name.",
@@ -2617,10 +2619,6 @@ func dataSourceComputeListDeletedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"virtual_image_id": {
Type: schema.TypeInt,
Computed: true,
},
"loader_type": {
Type: schema.TypeString,
Computed: true,
@@ -3180,7 +3178,7 @@ func dataSourceComputePCIDeviceListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},
@@ -3406,12 +3404,24 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "If True, the imageId, bootDisk, sepId, pool parameters are ignored and the compute is created without a boot disk in the stopped state.",
},
"boot_disk_blk_discard": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"boot_disk_size": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image.",
},
"boot_disk_cache": {
Type: schema.TypeString,
Optional: true,
Default: "none",
ValidateFunc: validation.StringInSlice([]string{"none", "writethrough"}, false),
Description: "Setting the boot disk caching mode",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
@@ -3447,11 +3457,11 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Default: 0,
Description: "ID of CD-ROM live image to boot",
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "ID of stack to start compute",
Description: "ID of node to start compute",
},
"is": {
Type: schema.TypeString,
@@ -3521,16 +3531,22 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
DiffSuppressFunc: networkSubresIPAddreDiffSupperss,
Description: "unique_identifier of LogicalPort on SDN side",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "network enable flag",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "network enable flag",
},
"net_mask": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Subnet mask, used only for DPDK and VFNIC network types",
},
},
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.",
},
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.",
},
"libvirt_settings": {
Type: schema.TypeSet,
@@ -3654,7 +3670,7 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
"value": {
Type: schema.TypeString,
Optional: true,
Required: true,
Description: "value that must match the key to be taken into account when analyzing this rule",
},
},
@@ -3695,7 +3711,7 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
"value": {
Type: schema.TypeString,
Optional: true,
Required: true,
Description: "value that must match the key to be taken into account when analyzing this rule",
},
},
@@ -3704,8 +3720,16 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
"disks": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cache": {
Type: schema.TypeString,
Optional: true,
Default: "none",
ValidateFunc: validation.StringInSlice([]string{"none", "writethrough"}, false),
Description: "Setting the disk caching mode",
},
"disk_name": {
Type: schema.TypeString,
Required: true,
@@ -3721,6 +3745,11 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Required: true,
Description: "Storage policy id of disk. The rules of the specified storage policy will be used.",
},
"blk_discard": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
@@ -3927,7 +3956,7 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"pin_to_stack": {
"pin_to_node": {
Type: schema.TypeBool,
Optional: true,
Default: false,
@@ -4487,10 +4516,10 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"stack_name": {
"node_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the stack, on which VM started",
Description: "Name of the node, on which VM started",
},
"stateless_sep_id": {
Type: schema.TypeInt,
@@ -4520,6 +4549,12 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"read_only": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Sets read-only mode for this compute. Only data operations allowed when enabled.",
},
"vnc_password": {
Type: schema.TypeString,
Computed: true,
@@ -4609,10 +4644,6 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"virtual_image_id": {
Type: schema.TypeInt,
Computed: true,
},
"virtual_image_name": {
Type: schema.TypeString,
Computed: true,

View File

@@ -35,14 +35,17 @@ import (
"context"
"errors"
"fmt"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/hashicorp/go-cty/cty"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/tasks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@@ -88,8 +91,8 @@ func utilityComputeStarted(ctx context.Context, d *schema.ResourceData, m interf
if altBootId, ok := d.Get("alt_boot_id").(int); ok {
req.AltBootID = uint64(altBootId)
}
if stackId, ok := d.Get("stack_id").(int); ok {
req.StackID = uint64(stackId)
if nodeId, ok := d.Get("node_id").(int); ok {
req.NodeID = uint64(nodeId)
}
if _, err := c.CloudBroker().Compute().Start(ctx, req); err != nil {
return err
@@ -108,6 +111,28 @@ func utilityComputeStarted(ctx context.Context, d *schema.ResourceData, m interf
return nil
}
func utilityComputeUpdateReadOnly(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, err := strconv.ParseUint(d.Id(), 10, 64)
if err != nil {
return err
}
req := compute.ChangeReadOnlyRequest{
ComputeID: computeId,
ReadOnly: d.Get("read_only").(bool),
}
if _, err := c.CloudBroker().Compute().ChangeReadOnly(ctx, req); err != nil {
return err
}
log.Debugf("resourceCompute: read_only=%t for Compute ID %d", req.ReadOnly, computeId)
return nil
}
func utilityComputeResize(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
@@ -235,6 +260,9 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
resizedDisks := make([]interface{}, 0)
renamedDisks := make([]interface{}, 0)
changeStoragePolicyDisks := make([]interface{}, 0)
cacheUpdatedDisks := make([]interface{}, 0)
blkDiscardUpdatedDisks := make([]interface{}, 0)
migratedDisks := make([]interface{}, 0)
presentNewDisks := make([]interface{}, 0)
presentOldDisks := make([]interface{}, 0)
@@ -280,9 +308,18 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
if isRenameDisk(oldConv, el) {
renamedDisks = append(renamedDisks, el)
}
if isChangeStoragePolicy(oldConv, el) {
if isMigrateDisk(oldConv, el) {
migratedDisks = append(migratedDisks, el)
} else if isChangeStoragePolicy(oldConv, el) {
changeStoragePolicyDisks = append(changeStoragePolicyDisks, el)
}
if isChangeCacheDisk(oldConv, el) {
cacheUpdatedDisks = append(cacheUpdatedDisks, el)
}
if isChangeBLKDiscardDisk(oldConv, el) {
blkDiscardUpdatedDisks = append(blkDiscardUpdatedDisks, el)
}
}
if len(deletedDisks) > 0 {
@@ -332,6 +369,9 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
if diskConv["image_id"].(int) != 0 {
req.ImageID = uint64(diskConv["image_id"].(int))
}
if cacheVal, ok := diskConv["cache"].(string); ok {
req.Cache = cacheVal
}
diskID, err := c.CloudBroker().Compute().DiskAdd(ctx, req)
if err != nil {
return err
@@ -409,6 +449,58 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
}
}
if len(cacheUpdatedDisks) > 0 {
for _, disk := range cacheUpdatedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_type"].(string) == "B" {
continue
}
diskID := uint64(diskConv["disk_id"].(int))
if diskID == 0 {
continue
}
req := disks.UpdateRequest{
DiskID: diskID,
Cache: diskConv["cache"].(string),
}
_, err := c.CloudBroker().Disks().Update(ctx, req)
if err != nil {
return err
}
}
}
if len(blkDiscardUpdatedDisks) > 0 {
for _, disk := range blkDiscardUpdatedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_type"].(string) == "B" {
continue
}
diskID := uint64(diskConv["disk_id"].(int))
if diskID == 0 {
continue
}
req := disks.UpdateRequest{
DiskID: diskID,
BLKDiscard: diskConv["blk_discard"].(bool),
}
_, err := c.CloudBroker().Disks().Update(ctx, req)
if err != nil {
return err
}
}
}
if len(migratedDisks) > 0 {
if err := utilityComputeMigrateDisks(ctx, d, m, migratedDisks, oldConv); err != nil {
return err
}
}
for i := range presentNewDisks {
newDisk := presentNewDisks[i].(map[string]interface{})
oldDisk := presentOldDisks[i].(map[string]interface{})
@@ -449,6 +541,56 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
return nil
}
func utilityComputeUpdateBootDiskCache(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
newCache := d.Get("boot_disk_cache").(string)
var bootDiskID uint64
if v, ok := d.GetOk("boot_disk_id"); ok {
if id, ok := v.(int); ok {
bootDiskID = uint64(id)
}
}
if bootDiskID == 0 {
return fmt.Errorf("cannot update boot_disk_cache: boot disk ID is unknown for compute %s", d.Id())
}
req := disks.UpdateRequest{
DiskID: bootDiskID,
Cache: newCache,
}
_, err := c.CloudBroker().Disks().Update(ctx, req)
return err
}
func utilityComputeUpdateBootDiskBLKDiscard(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
newBLKDiscard := d.Get("boot_disk_blk_discard").(bool)
var bootDiskID uint64
if v, ok := d.GetOk("boot_disk_id"); ok {
if id, ok := v.(int); ok {
bootDiskID = uint64(id)
}
}
if bootDiskID == 0 {
return fmt.Errorf("cannot update boot_disk_cache: boot disk ID is unknown for compute %s", d.Id())
}
req := disks.UpdateRequest{
DiskID: bootDiskID,
BLKDiscard: newBLKDiscard,
}
_, err := c.CloudBroker().Disks().Update(ctx, req)
return err
}
func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error {
c := m.(*controller.ControllerCfg)
@@ -709,6 +851,12 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
req.MTU = uint64(netData["mtu"].(int))
}
if req.NetType == "DPDK" || req.NetType == "VFNIC" {
if netMask, ok := netData["net_mask"].(int); ok && netMask > 0 {
req.NetMask = uint64(netMask)
}
}
if netData["sdn_interface_id"].(string) != "" {
req.SDNInterfaceID = netData["sdn_interface_id"].(string)
}
@@ -1613,33 +1761,33 @@ func utilityComputeUpdateCD(ctx context.Context, d *schema.ResourceData, m inter
return nil
}
func utilityComputePinToStack(ctx context.Context, d *schema.ResourceData, m interface{}) error {
func utilityComputePinToNode(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
start, _ := d.GetOk("started")
_, stackOk := d.GetOk("stack_id")
if !start.(bool) && !stackOk {
errors.New("cannot pin to stack a VM, that is not started and stack_id is not set")
}
oldPin, newPin := d.GetChange("pin_to_stack")
oldPin, newPin := d.GetChange("pin_to_node")
if oldPin.(bool) && !newPin.(bool) {
req := compute.UnpinFromStackRequest{
req := compute.UnpinFromNodeRequest{
ComputeID: computeId,
}
_, err := c.CloudBroker().Compute().UnpinFromStack(ctx, req)
_, err := c.CloudBroker().Compute().UnpinFromNode(ctx, req)
if err != nil {
return err
}
}
if !oldPin.(bool) && newPin.(bool) {
req := compute.PinToStackRequest{
ComputeID: computeId,
TargetStackID: uint64(d.Get("stack_id").(int)),
start, _ := d.GetOk("started")
_, nodeOk := d.GetOk("node_id")
if !start.(bool) && !nodeOk {
return errors.New("cannot pin to node a VM, that is not started and node_id is not set")
}
req := compute.PinToNodeRequest{
ComputeID: computeId,
TargetNodeID: uint64(d.Get("node_id").(int)),
}
if force, ok := d.Get("force_pin").(bool); ok {
@@ -1650,7 +1798,7 @@ func utilityComputePinToStack(ctx context.Context, d *schema.ResourceData, m int
req.AutoStart = autoStart
}
_, err := c.CloudBroker().Compute().PinToStack(ctx, req)
_, err := c.CloudBroker().Compute().PinToNode(ctx, req)
if err != nil {
return err
}
@@ -1905,6 +2053,30 @@ func isRenameDisk(els []interface{}, el interface{}) bool {
return false
}
func isChangeCacheDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["disk_id"].(int) == elConv["disk_id"].(int) &&
elOldConv["cache"].(string) != elConv["cache"].(string) {
return true
}
}
return false
}
func isChangeBLKDiscardDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["disk_id"].(int) == elConv["disk_id"].(int) &&
elOldConv["blk_discard"].(bool) != elConv["blk_discard"].(bool) {
return true
}
}
return false
}
func isChangeStoragePolicy(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
@@ -1917,6 +2089,22 @@ func isChangeStoragePolicy(els []interface{}, el interface{}) bool {
return false
}
func isMigrateDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["disk_id"].(int) == elConv["disk_id"].(int) &&
elOldConv["disk_id"].(int) != 0 {
sepIDChanged := elOldConv["sep_id"].(int) != elConv["sep_id"].(int)
poolChanged := elOldConv["pool"].(string) != elConv["pool"].(string)
if sepIDChanged || poolChanged {
return true
}
}
}
return false
}
func isContainsDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
@@ -1948,6 +2136,105 @@ func isChangeNodesDisk(els []interface{}, elOld interface{}) (bool, interface{})
return false, nil
}
func utilityComputeMigrateDisks(ctx context.Context, d *schema.ResourceData, m interface{}, migratedDisks, oldDisks []interface{}) error {
c := m.(*controller.ControllerCfg)
for _, disk := range migratedDisks {
diskConv := disk.(map[string]interface{})
oldDiskID := uint64(diskConv["disk_id"].(int))
if oldDiskID == 0 {
log.Debugf("utilityComputeMigrateDisks: skipping disk with id=0")
continue
}
newSepID := uint64(diskConv["sep_id"].(int))
newPool := diskConv["pool"].(string)
storagePolicyID := uint64(diskConv["storage_policy_id"].(int))
log.Debugf("utilityComputeMigrateDisks: migrating disk_id=%d to sep_id=%d, pool=%s", oldDiskID, newSepID, newPool)
migrateReq := disks.MigrateRequest{
DiskID: oldDiskID,
SEPID: newSepID,
PoolName: newPool,
StoragePolicyID: storagePolicyID,
}
taskID, err := c.CloudBroker().Disks().Migrate(ctx, migrateReq)
if err != nil {
return fmt.Errorf("failed to start disk migration for disk_id=%d: %w", oldDiskID, err)
}
log.Debugf("utilityComputeMigrateDisks: disk migration started, taskID=%s", taskID)
newDiskID, err := utilityComputeWaitForMigrationTask(ctx, c, taskID, oldDiskID)
if err != nil {
return fmt.Errorf("disk migration task failed for disk_id=%d: %w", oldDiskID, err)
}
log.Debugf("utilityComputeMigrateDisks: disk migration completed, old_disk_id=%d, new_disk_id=%d", oldDiskID, newDiskID)
}
return nil
}
func utilityComputeWaitForMigrationTask(ctx context.Context, c *controller.ControllerCfg, taskID string, oldDiskID uint64) (uint64, error) {
for {
time.Sleep(15 * time.Second)
taskReq := tasks.GetRequest{
AuditID: strings.Trim(taskID, `"`),
}
taskInfo, err := c.CloudBroker().Tasks().Get(ctx, taskReq)
if err != nil {
return 0, fmt.Errorf("failed to get task status: %w", err)
}
log.Debugf("utilityComputeWaitForMigrationTask: taskID=%s, completed=%t, status=%s", taskID, taskInfo.Completed, taskInfo.Status)
if taskInfo.Completed {
if taskInfo.Error != "" {
return 0, fmt.Errorf("migration task failed with error: %s", taskInfo.Error)
}
resultStr, err := taskInfo.Result.ToString()
if err != nil {
return 0, fmt.Errorf("failed to get task result: %w", err)
}
log.Debugf("utilityComputeWaitForMigrationTask: migration result: %s", resultStr)
newDiskID, err := extractNewDiskIDFromResult(resultStr)
if err != nil {
return 0, fmt.Errorf("failed to parse migration result: %w", err)
}
return newDiskID, nil
}
}
}
func extractNewDiskIDFromResult(result string) (uint64, error) {
re := regexp.MustCompile(`Disk ID \d+ successfully migrated to Disk ID (\d+)`)
matches := re.FindStringSubmatch(result)
if len(matches) < 2 {
return 0, fmt.Errorf("could not extract new disk ID from result: %s", result)
}
newDiskID, err := strconv.ParseUint(matches[1], 10, 64)
if err != nil {
return 0, fmt.Errorf("failed to parse new disk ID: %w", err)
}
return newDiskID, nil
}
func isContainsAR(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})

View File

@@ -69,11 +69,11 @@ func utilityDataComputeListCheckPresence(ctx context.Context, d *schema.Resource
if ipAddress, ok := d.GetOk("ip_address"); ok {
req.IPAddress = ipAddress.(string)
}
if stackID, ok := d.GetOk("stack_id"); ok {
req.StackID = stackID.(uint64)
if nodeID, ok := d.GetOk("node_id"); ok {
req.NodeID = nodeID.(uint64)
}
if stackName, ok := d.GetOk("stack_name"); ok {
req.StackName = stackName.(string)
if nodeName, ok := d.GetOk("node_name"); ok {
req.NodeName = nodeName.(string)
}
if cdImageID, ok := d.GetOk("cd_image_id"); ok {
req.CDImageID = cdImageID.(uint64)

View File

@@ -1301,6 +1301,7 @@ func lbResourceSchemaMake() map[string]*schema.Schema {
"start": {
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"desc": {
Type: schema.TypeString,

View File

@@ -43,9 +43,13 @@ import (
func flattenNode(d *schema.ResourceData, item *node.RecordNode) {
log.Debugf("flattenNode: decoded node id %d", d.Get("node_id").(int))
d.Set("auto_start", item.AutoStart)
d.Set("auto_start_count", item.AutoStartCount)
d.Set("consumption", flattenConsumption(item.Consumption))
d.Set("cpu_info", flattenCpuInfo(item.CpuInfo))
d.Set("cpu_allocation_ratio", item.CPUAllocationRatio)
d.Set("mem_allocation_ratio", item.MemAllocationRatio)
d.Set("description", item.Description)
d.Set("dpdk", flattenDPDKItem(item.DPDK))
d.Set("gid", item.GID)
d.Set("ipaddr", item.IPAddr)
@@ -54,12 +58,14 @@ func flattenNode(d *schema.ResourceData, item *node.RecordNode) {
d.Set("need_reboot", item.NeedReboot)
d.Set("net_addr", flattenGetNetAddr(item.NetAddr))
d.Set("network_mode", item.NetworkMode)
d.Set("openvswitch_bridges", item.OpenvSwitchBridges)
d.Set("nic_info", flattenNicInfo(item.NicInfo))
d.Set("numa_topology", flattenNumaTopology(item.NumaTopology))
d.Set("reserved_cpus", flattenNodeItem(item.ReservedCPUs))
d.Set("roles", item.Roles)
d.Set("sdn_hypervisor_name", item.SDNHypervisorName)
d.Set("sriov_enabled", item.SriovEnabled)
d.Set("stack_id", item.StackID)
d.Set("node_id", item.ID)
d.Set("status", item.Status)
d.Set("to_active", flattenRole(item.ToActive))
d.Set("to_installing", flattenRole(item.ToInstalling))
@@ -67,6 +73,7 @@ func flattenNode(d *schema.ResourceData, item *node.RecordNode) {
d.Set("to_restricted", flattenRole(item.ToRestricted))
d.Set("version", item.Version)
d.Set("zone_id", item.ZoneID)
d.Set("usable_cpus", item.UsableCPUs)
}
func flattenConsumption(info node.ConsumptionInfo) []map[string]interface{} {
@@ -115,48 +122,57 @@ func flattenNodeList(nodes *node.ListNodes) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(nodes.Data))
for _, item := range nodes.Data {
temp := map[string]interface{}{
"additional_pkgs": flattenNodeItem(item.AdditionalPkgs),
"cpu_info": flattenCpuInfo(item.CpuInfo),
"description": item.Description,
"dpdk": flattenDPDKItem(item.DPDK),
"gid": item.GID,
"guid": item.GUID,
"hostkey": item.HostKey,
"node_id": item.ID,
"ipaddr": item.IPAddr,
"isolated_cpus": flattenNodeItem(item.IsolatedCpus),
"lastcheck": item.LastCheck,
"machine_guid": item.MachineGUID,
"mainboard_sn": item.MainboardSN,
"memory": item.Memory,
"milestones": item.Milestones,
"model": item.Model,
"name": item.Name,
"need_reboot": item.NeedReboot,
"net_addr": flattenNetAddr(item.NetAddr),
"network_mode": item.NetworkMode,
"nic_info": flattenNicInfo(item.NicInfo),
"node_uuid": item.NodeUUID,
"numa_topology": flattenNumaTopology(item.NumaTopology),
"peer_backup": item.PeerBackup,
"peer_log": item.PeerLog,
"peer_stats": item.PeerStats,
"pgpus": item.Pgpus,
"public_keys": item.PublicKeys,
"release": item.Release,
"reserved_cpus": flattenNodeItem(item.ReservedCPUs),
"roles": item.Roles,
"sdn_hypervisor_name": item.SDNHypervisorName,
"seps": item.Seps,
"serial_num": item.SerialNum,
"sriov_enabled": item.SriovEnabled,
"stack_id": item.StackID,
"status": item.Status,
"tags": item.Tags,
"type": item.Type,
"uefi_firmware_file": item.UEFIFirmwareFile,
"version": item.Version,
"zone_id": item.ZoneID,
"additional_pkgs": flattenNodeItem(item.AdditionalPkgs),
"auto_start": item.AutoStart,
"auto_start_count": item.AutoStartCount,
"cpu_info": flattenCpuInfo(item.CpuInfo),
"description": item.Description,
"dpdk": flattenDPDKItem(item.DPDK),
"gid": item.GID,
"guid": item.GUID,
"hostkey": item.HostKey,
"node_id": item.ID,
"ipaddr": item.IPAddr,
"isolated_cpus": flattenNodeItem(item.IsolatedCpus),
"lastcheck": item.LastCheck,
"machine_guid": item.MachineGUID,
"mainboard_sn": item.MainboardSN,
"memory": item.Memory,
"milestones": item.Milestones,
"model": item.Model,
"name": item.Name,
"need_reboot": item.NeedReboot,
"net_addr": flattenNetAddr(item.NetAddr),
"network_mode": item.NetworkMode,
"nic_info": flattenNicInfo(item.NicInfo),
"node_uuid": item.NodeUUID,
"numa_topology": flattenNumaTopology(item.NumaTopology),
"peer_backup": item.PeerBackup,
"peer_log": item.PeerLog,
"peer_stats": item.PeerStats,
"pgpus": item.Pgpus,
"public_keys": item.PublicKeys,
"release": item.Release,
"reserved_cpus": flattenNodeItem(item.ReservedCPUs),
"roles": item.Roles,
"sdn_hypervisor_name": item.SDNHypervisorName,
"seps": item.Seps,
"serial_num": item.SerialNum,
"sriov_enabled": item.SriovEnabled,
"status": item.Status,
"tags": item.Tags,
"type": item.Type,
"uefi_firmware_file": item.UEFIFirmwareFile,
"usable_cpus": item.UsableCPUs,
"version": item.Version,
"zone_id": item.ZoneID,
"openvswitch_bridges": item.OpenvSwitchBridges,
"api_url": item.APIUrl,
"drivers": item.Drivers,
"old_compat_lvm_id": item.OldCompatLVMID,
"cpu_allocation_ratio": item.CPUAllocationRatio,
"mem_allocation_ratio": item.MemAllocationRatio,
"packages": flattenPackages(item.Packages),
}
res = append(res, temp)
}
@@ -250,6 +266,8 @@ func flattenCpuInfo(info node.CpuInfo) []map[string]interface{} {
"clock_speed": info.ClockSpeed,
"core_count": info.CoreCount,
"phys_count": info.PhysCount,
"flags": info.Flags,
"model_name": info.ModelName,
}
res[0] = temp
return res
@@ -274,6 +292,20 @@ func flattenNodeItem(m []interface{}) []string {
return output
}
func flattenPackages(pkgs map[string]node.PackageInfo) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(pkgs))
for _, p := range pkgs {
temp := map[string]interface{}{
"ver": p.Ver,
"size": p.InstalledSize,
}
res = append(res, temp)
}
return res
}
func flattenDPDKItem(dpdk node.DPDK) []map[string]interface{} {
res := make([]map[string]interface{}, 1)
bridges := make([]map[string]interface{}, 1)

View File

@@ -9,6 +9,14 @@ func dataSourceNodeSchemaMake() map[string]*schema.Schema {
Required: true,
Description: "node id",
},
"auto_start": {
Type: schema.TypeBool,
Computed: true,
},
"auto_start_count": {
Type: schema.TypeInt,
Computed: true,
},
"consumption": {
Type: schema.TypeList,
Computed: true,
@@ -102,11 +110,30 @@ func dataSourceNodeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"flags": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"model_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"cpu_allocation_ratio": {
Type: schema.TypeInt,
Type: schema.TypeFloat,
Computed: true,
},
"mem_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"description": {
Type: schema.TypeString,
Computed: true,
},
"dpdk": {
@@ -203,10 +230,24 @@ func dataSourceNodeSchemaMake() map[string]*schema.Schema {
},
},
},
"usable_cpus": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"network_mode": {
Type: schema.TypeString,
Computed: true,
},
"openvswitch_bridges": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"nic_info": {
Type: schema.TypeList,
Computed: true,
@@ -317,12 +358,12 @@ func dataSourceNodeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
},
},
"sriov_enabled": {
Type: schema.TypeBool,
"sdn_hypervisor_name": {
Type: schema.TypeString,
Computed: true,
},
"stack_id": {
Type: schema.TypeInt,
"sriov_enabled": {
Type: schema.TypeBool,
Computed: true,
},
"status": {
@@ -485,6 +526,22 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
},
},
"api_url": {
Type: schema.TypeString,
Computed: true,
},
"auto_start": {
Type: schema.TypeBool,
Computed: true,
},
"auto_start_count": {
Type: schema.TypeInt,
Computed: true,
},
"cpu_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"cpu_info": {
Type: schema.TypeList,
Computed: true,
@@ -502,6 +559,17 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"flags": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"model_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
@@ -509,6 +577,13 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"drivers": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"dpdk": {
Type: schema.TypeList,
Computed: true,
@@ -600,6 +675,10 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"mem_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"memory": {
Type: schema.TypeInt,
Computed: true,
@@ -662,6 +741,17 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"old_compat_lvm_id": {
Type: schema.TypeInt,
Computed: true,
},
"openvswitch_bridges": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"nic_info": {
Type: schema.TypeList,
Computed: true,
@@ -762,6 +852,22 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
},
},
},
"packages": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"ver": {
Type: schema.TypeString,
Computed: true,
},
"size": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"peer_backup": {
Type: schema.TypeInt,
Computed: true,
@@ -825,10 +931,6 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"stack_id": {
Type: schema.TypeInt,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
@@ -848,6 +950,13 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"usable_cpus": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"version": {
Type: schema.TypeString,
Computed: true,

View File

@@ -4,14 +4,11 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/pcidevice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
)
func flattenPcidevice(d *schema.ResourceData, pd *pcidevice.ItemPCIDevice) {
log.Debugf("flattenPCIDevice: ID %d", pd.ID)
d.Set("ckey", pd.CKey)
d.Set("meta", flattens.FlattenMeta(pd.Meta))
d.Set("compute_id", pd.ComputeID)
d.Set("description", pd.Description)
d.Set("guid", pd.GUID)
@@ -19,7 +16,7 @@ func flattenPcidevice(d *schema.ResourceData, pd *pcidevice.ItemPCIDevice) {
d.Set("device_id", pd.ID)
d.Set("name", pd.Name)
d.Set("rg_id", pd.RGID)
d.Set("stack_id", pd.StackID)
d.Set("node_id", pd.NodeID)
d.Set("status", pd.Status)
d.Set("system_name", pd.SystemName)
}
@@ -30,8 +27,6 @@ func flattenPcideviceList(pl *pcidevice.ListPCIDevices) []map[string]interface{}
res := make([]map[string]interface{}, 0, len(pl.Data))
for _, item := range pl.Data {
temp := map[string]interface{}{
"ckey": item.CKey,
"meta": flattens.FlattenMeta(item.Meta),
"compute_id": item.ComputeID,
"description": item.Description,
"guid": item.GUID,
@@ -39,7 +34,7 @@ func flattenPcideviceList(pl *pcidevice.ListPCIDevices) []map[string]interface{}
"device_id": item.ID,
"rg_id": item.RGID,
"name": item.Name,
"stack_id": item.StackID,
"node_id": item.NodeID,
"status": item.Status,
"system_name": item.SystemName,
}

View File

@@ -44,14 +44,14 @@ import (
func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics {
var errs []error
stackId := uint64(d.Get("stack_id").(int))
nodeId := uint64(d.Get("node_id").(int))
rgId := uint64(d.Get("rg_id").(int))
if err := ic.ExistRG(ctx, rgId, c); err != nil {
errs = append(errs, err)
}
if err := ic.ExistStack(ctx, stackId, c); err != nil {
if err := ic.ExistNode(ctx, nodeId, c); err != nil {
errs = append(errs, err)
}

View File

@@ -56,7 +56,7 @@ func resourcePcideviceCreate(ctx context.Context, d *schema.ResourceData, m inte
return diags
}
createReq.StackID = uint64(d.Get("stack_id").(int))
createReq.NodeID = uint64(d.Get("node_id").(int))
createReq.RGID = uint64(d.Get("rg_id").(int))
createReq.Name = d.Get("name").(string)
createReq.HWPath = d.Get("hw_path").(string)

View File

@@ -11,17 +11,6 @@ func dataSourcePcideviceSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
@@ -46,7 +35,7 @@ func dataSourcePcideviceSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},
@@ -109,17 +98,6 @@ func dataSourcePcideviceListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
@@ -144,7 +122,7 @@ func dataSourcePcideviceListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},
@@ -170,11 +148,11 @@ func dataSourcePcideviceListSchemaMake() map[string]*schema.Schema {
func resourcePcideviceSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "stackId",
Description: "Node ID",
},
"rg_id": {
Type: schema.TypeInt,

View File

@@ -70,7 +70,6 @@ func flattenRgResourceLimits(rl rg.ResourceLimits) []map[string]interface{} {
"cu_dm": rl.CUDM,
"cu_i": rl.CUI,
"cu_m": rl.CUM,
"cu_np": rl.CUNP,
"gpu_units": rl.GPUUnits,
"storage_policy": flattenRgStoragePolicy(rl.StoragePolicies),
}
@@ -160,7 +159,6 @@ func flattenRgUsageResource(d *schema.ResourceData, usage rg.Reservation) {
d.Set("disk_size", usage.DiskSize)
d.Set("disk_size_max", usage.DiskSizeMax)
d.Set("extips", usage.ExtIPs)
d.Set("exttraffic", usage.ExtTraffic)
d.Set("gpu", usage.GPU)
d.Set("ram", usage.RAM)
d.Set("seps", flattenRGSeps(usage.SEPs))
@@ -173,7 +171,6 @@ func flattenResource(resource rg.Reservation) []map[string]interface{} {
"disk_size": resource.DiskSize,
"disk_size_max": resource.DiskSizeMax,
"extips": resource.ExtIPs,
"exttraffic": resource.ExtTraffic,
"gpu": resource.GPU,
"ram": resource.RAM,
"seps": flattenRGSeps(resource.SEPs),

Some files were not shown because too many files have changed in this diff Show More