This commit is contained in:
2026-02-11 13:02:14 +03:00
parent 069d63a65c
commit b8283ebfaf
277 changed files with 2184 additions and 4192 deletions

View File

@@ -120,7 +120,6 @@ func flattenAccLimits(l account.Limits) []map[string]interface{} {
"disksize": l.DiskSize,
"disksizemax": l.DiskSizeMax,
"extips": l.ExtIPs,
"exttraffic": l.ExtTraffic,
"gpu": l.GPU,
"ram": l.RAM,
"seps": l.SEPs,
@@ -136,7 +135,6 @@ func flattenAccResource(r account.Resource) []map[string]interface{} {
"disksize": r.DiskSize,
"disksizemax": r.DiskSizeMax,
"extips": r.ExtIPs,
"exttraffic": r.ExtTraffic,
"gpu": r.GPU,
"ram": r.RAM,
"seps": flattenAccountSeps(r.SEPs),
@@ -190,7 +188,6 @@ func flattenRgResourceLimits(rl account.ResourceLimits) []map[string]interface{}
"cu_dm": rl.CuDM,
"cu_i": rl.CuI,
"cu_m": rl.CuM,
"cu_np": rl.CuNP,
"gpu_units": rl.GPUUnits,
"storage_policy": flattenSTPolicy(rl.StoragePolicies),
}

View File

@@ -126,14 +126,6 @@ func resourceAccountCreate(ctx context.Context, d *schema.ResourceData, m interf
req.MaxNumPublicIP = maxNumPublicIP
}
}
if resLimitsConv["cu_np"] != nil {
maxNP := int64(resLimitsConv["cu_np"].(float64))
if maxNP == 0 {
req.MaxNetworkPeerTransfer = -1
} else {
req.MaxNetworkPeerTransfer = maxNP
}
}
if resLimitsConv["gpu_units"] != nil {
gpuUnits := int64(resLimitsConv["gpu_units"].(float64))
if gpuUnits == 0 {

View File

@@ -138,11 +138,6 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
Optional: true,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Optional: true,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Optional: true,
@@ -665,10 +660,6 @@ func dataSourceAccountListDeletedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
@@ -1024,10 +1015,6 @@ func dataSourceAccountResourceConsumptionGetSchemaMake() map[string]*schema.Sche
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -1128,10 +1115,6 @@ func dataSourceAccountResourceConsumptionGetSchemaMake() map[string]*schema.Sche
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -1236,10 +1219,6 @@ func dataSourceAccountResourceConsumptionGetSchemaMake() map[string]*schema.Sche
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
@@ -1427,10 +1406,6 @@ func dataSourceAccountListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
@@ -1548,10 +1523,6 @@ func dataSourceAccountResourceConsumptionListSchemaMake() map[string]*schema.Sch
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -1652,10 +1623,6 @@ func dataSourceAccountResourceConsumptionListSchemaMake() map[string]*schema.Sch
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -1842,10 +1809,6 @@ func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -1902,10 +1865,6 @@ func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -1942,10 +1901,6 @@ func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -2331,10 +2286,6 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,

View File

@@ -271,14 +271,6 @@ func utilityAccountUpdate(ctx context.Context, d *schema.ResourceData, m interfa
req.MaxNumPublicIP = int64(maxNumPublicIP)
}
}
if resLimitConv["cu_np"] != nil {
maxNP := int(resLimitConv["cu_np"].(float64))
if maxNP == 0 {
req.MaxNetworkPeerTransfer = -1
} else {
req.MaxNetworkPeerTransfer = int64(maxNP)
}
}
if resLimitConv["gpu_units"] != nil {
gpuUnits := int(resLimitConv["gpu_units"].(float64))
if gpuUnits == 0 {

View File

@@ -50,11 +50,13 @@ func flattenAudit(d *schema.ResourceData, au *audit.RecordAudit) {
d.Set("responsetime", au.ResponseTime)
d.Set("result", au.Result)
d.Set("status_code", au.StatusCode)
d.Set("tags", au.Tags)
d.Set("timestamp", au.Timestamp)
d.Set("timestamp_end", au.TimestampEnd)
d.Set("ttl", au.TTL)
d.Set("user", au.User)
d.Set("resgroup_id", au.ResgroupID)
d.Set("account_id", au.AccountID)
d.Set("compute_id", au.ComputeID)
}
func flattenAuditList(au *audit.ListAudits) []map[string]interface{} {

View File

@@ -46,11 +46,6 @@ func dataSourceAuditSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
//TODO
//"tags": {
// Type: schema.TypeString,
// Computed: true,
//},
"timestamp": {
Type: schema.TypeFloat,
Computed: true,
@@ -67,6 +62,18 @@ func dataSourceAuditSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"resgroup_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
},
}
}

View File

@@ -52,7 +52,7 @@ func utilityAuditCheckPresence(ctx context.Context, d *schema.ResourceData, m in
req.AuditGuid = d.Get("audit_guid").(string)
}
log.Debugf("utilityStackCheckPresence: load stack")
log.Debugf("utilityAuditCheckPresence: load audit")
auditInfo, err := c.CloudBroker().Audit().Get(ctx, req)
if err != nil {
return nil, err

View File

@@ -13,6 +13,7 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
d.Set("account_id", disk.AccountID)
d.Set("account_name", disk.AccountName)
d.Set("acl", string(diskAcl))
d.Set("blk_discard", disk.BLKDiscard)
d.Set("boot_partition", disk.BootPartition)
d.Set("computes", flattenDiskComputes(disk.Computes))
d.Set("created_by", disk.CreatedBy)
@@ -53,6 +54,7 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
d.Set("sep_id", disk.SEPID)
d.Set("sep_type", disk.SEPType)
d.Set("shareable", disk.Shareable)
d.Set("cache", disk.Cache)
d.Set("size_available", disk.SizeAvailable)
d.Set("size_max", disk.SizeMax)
d.Set("size_used", disk.SizeUsed)
@@ -108,6 +110,7 @@ func flattenDiskReplica(d *schema.ResourceData, disk *disks.RecordDisk, statusRe
d.Set("sep_id", disk.SEPID)
d.Set("sep_type", disk.SEPType)
d.Set("shareable", disk.Shareable)
d.Set("cache", disk.Cache)
d.Set("size_max", disk.SizeMax)
d.Set("size_used", disk.SizeUsed)
d.Set("snapshots", flattendDiskSnapshotList(disk.Snapshots))
@@ -183,6 +186,7 @@ func flattenDiskList(dl *disks.ListDisks) []map[string]interface{} {
"account_id": disk.AccountID,
"account_name": disk.AccountName,
"acl": string(diskAcl),
"blk_discard": disk.BLKDiscard,
"boot_partition": disk.BootPartition,
"computes": flattenDiskComputes(disk.Computes),
"created_by": disk.CreatedBy,
@@ -221,6 +225,8 @@ func flattenDiskList(dl *disks.ListDisks) []map[string]interface{} {
"role": disk.Role,
"sep_id": disk.SEPID,
"sep_type": disk.SEPType,
"shareable": disk.Shareable,
"cache": disk.Cache,
"size_available": disk.SizeAvailable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
@@ -293,11 +299,11 @@ func flattenDiskListUnattached(ul *disks.ListUnattachedDisks) []map[string]inter
for _, unattachedDisk := range ul.Data {
unattachedDiskAcl, _ := json.Marshal(unattachedDisk.ACL)
tmp := map[string]interface{}{
"_ckey": unattachedDisk.CKey,
"_meta": flattens.FlattenMeta(unattachedDisk.Meta),
"account_id": unattachedDisk.AccountID,
"account_name": unattachedDisk.AccountName,
"acl": string(unattachedDiskAcl),
"blk_discard": unattachedDisk.BLKDiscard,
"boot_partition": unattachedDisk.BootPartition,
"created_time": unattachedDisk.CreatedTime,
"deleted_time": unattachedDisk.DeletedTime,
@@ -330,6 +336,7 @@ func flattenDiskListUnattached(ul *disks.ListUnattachedDisks) []map[string]inter
"role": unattachedDisk.Role,
"sep_id": unattachedDisk.SEPID,
"shareable": unattachedDisk.Shareable,
"cache": unattachedDisk.Cache,
"size_max": unattachedDisk.SizeMax,
"size_used": unattachedDisk.SizeUsed,
"snapshots": flattenDiskSnapshotList(unattachedDisk.Snapshots),

View File

@@ -75,6 +75,14 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
req.Pool = pool.(string)
}
if cache, ok := d.GetOk("cache"); ok {
req.Cache = cache.(string)
}
if blkDiscard, ok := d.GetOk("blk_discard"); ok {
req.BLKDiscard = blkDiscard.(bool)
}
diskID, err := c.CloudBroker().Disks().Create(ctx, req)
if err != nil {
d.SetId("")
@@ -236,6 +244,18 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
}
}
if d.HasChange("cache") {
if err := resourceDiskChangeCache(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("blk_discard") {
if err := resourceDiskChangeBLKDiscard(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("node_ids") {
log.Debugf("resourceDiskUpdate: present for disk %d", d.Get("disk_id"))
if err := resourceDiskChangeNodes(ctx, d, m, false); err != nil {
@@ -362,6 +382,26 @@ func resourceDiskChangeStoragePolicyID(ctx context.Context, d *schema.ResourceDa
return err
}
func resourceDiskChangeCache(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
_, err := c.CloudBroker().Disks().Update(ctx, disks.UpdateRequest{
DiskID: uint64(d.Get("disk_id").(int)),
Cache: d.Get("cache").(string),
})
return err
}
func resourceDiskChangeBLKDiscard(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
_, err := c.CloudBroker().Disks().Update(ctx, disks.UpdateRequest{
DiskID: uint64(d.Get("disk_id").(int)),
BLKDiscard: d.Get("blk_discard").(bool),
})
return err
}
func resourceDiskChangeNodes(ctx context.Context, d *schema.ResourceData, m interface{}, afterCreate bool) error {
c := m.(*controller.ControllerCfg)
diskID := uint64(d.Get("disk_id").(int))

View File

@@ -22,6 +22,10 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"blk_discard": {
Type: schema.TypeBool,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
@@ -281,6 +285,10 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"size_available": {
Type: schema.TypeFloat,
Computed: true,
@@ -456,6 +464,10 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"blk_discard": {
Type: schema.TypeBool,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
@@ -719,6 +731,10 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"size_available": {
Type: schema.TypeFloat,
Computed: true,
@@ -880,6 +896,10 @@ func dataSourceDiskListDeletedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"blk_discard": {
Type: schema.TypeBool,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
@@ -1143,6 +1163,10 @@ func dataSourceDiskListDeletedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"size_available": {
Type: schema.TypeFloat,
Computed: true,
@@ -1400,11 +1424,6 @@ func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"_ckey": {
Type: schema.TypeString,
Computed: true,
Description: "CKey",
},
"_meta": {
Type: schema.TypeList,
Computed: true,
@@ -1427,6 +1446,10 @@ func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"blk_discard": {
Type: schema.TypeBool,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
@@ -1659,6 +1682,10 @@ func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
@@ -1964,12 +1991,23 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Optional: true,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Optional: true,
Default: "none",
Description: "Cache mode for the disk",
},
"restore": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "restore deleting disk",
},
"blk_discard": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
@@ -2569,6 +2607,10 @@ func dataSourceDiskReplicationSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
@@ -2939,6 +2981,10 @@ func resourceDiskReplicationSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -332,15 +332,15 @@ func handleRestartUpdate(ctx context.Context, d *schema.ResourceData, c *control
}
func handleMigrateUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error {
stackId := uint64(d.Get("migrate").(int))
nodeId := uint64(d.Get("migrate").(int))
if err := ic.ExistStack(ctx, stackId, c); err != nil {
if err := ic.ExistNode(ctx, nodeId, c); err != nil {
return err
}
req := extnet.DeviceMigrateRequest{
NetID: recNet.ID,
StackID: stackId,
NetID: recNet.ID,
NodeID: nodeId,
}
_, err := c.CloudBroker().ExtNet().DeviceMigrate(ctx, req)

View File

@@ -63,8 +63,6 @@ func flattenFlipgroup(d *schema.ResourceData, flip *flipgroup.RecordFLIPGroup) {
d.Set("net_id", flip.NetID)
d.Set("net_type", flip.NetType)
d.Set("network", flip.Network)
d.Set("rg_id", flip.RGID)
d.Set("rg_name", flip.RGName)
d.Set("status", flip.Status)
d.Set("updated_by", flip.UpdatedBy)
d.Set("updated_time", flip.UpdatedTime)

View File

@@ -123,16 +123,6 @@ func dataSourceFlipgroupSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "network",
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
Description: "rg_id",
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
Description: "rg_name",
},
"status": {
Type: schema.TypeString,
Computed: true,
@@ -179,11 +169,6 @@ func dataSourceFlipgroupsListSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "by_ip",
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "rg_id",
},
"by_id": {
Type: schema.TypeInt,
Optional: true,
@@ -464,16 +449,6 @@ func resourceFlipgroupSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "network",
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
Description: "rg_id",
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
Description: "rg_name",
},
"status": {
Type: schema.TypeString,
Computed: true,

View File

@@ -63,9 +63,6 @@ func utilityFlipgroupListCheckPresence(ctx context.Context, d *schema.ResourceDa
if byIp, ok := d.GetOk("by_ip"); ok {
req.ByIP = byIp.(string)
}
if rgId, ok := d.GetOk("rg_id"); ok {
req.RGID = uint64(rgId.(int))
}
if byID, ok := d.GetOk("by_id"); ok {
req.ByID = uint64(byID.(int))
}

View File

@@ -73,7 +73,6 @@ func flattenGridRecordResource(rr grid.RecordResource) []map[string]interface{}
"disk_size": rr.DiskSize,
"disk_size_max": rr.DiskSizeMax,
"ext_ips": rr.ExtIPs,
"ext_traffic": rr.ExtTraffic,
"gpu": rr.GPU,
"ram": rr.RAM,
"seps": flattenGridSeps(rr.SEPs),

View File

@@ -124,10 +124,6 @@ func dataSourceGridListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -184,10 +180,6 @@ func dataSourceGridListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -338,10 +330,6 @@ func dataSourceGridGetConsumptionSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -398,10 +386,6 @@ func dataSourceGridGetConsumptionSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -469,10 +453,6 @@ func dataSourceGridListConsumptionSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -529,10 +509,6 @@ func dataSourceGridListConsumptionSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -15,8 +15,8 @@ import (
cb_k8ci "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8ci"
cb_k8s "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s"
cb_lb "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb"
cb_node "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/node"
cb_rg "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
cb_stack "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/stack"
cb_trunk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/trunk"
cb_vfpool "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vfpool"
cb_vins "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins"
@@ -467,53 +467,23 @@ func ExistGID(ctx context.Context, gid uint64, c *controller.ControllerCfg) erro
return fmt.Errorf("GID with id %v not found", gid)
}
func ExistStack(ctx context.Context, stackId uint64, c *controller.ControllerCfg) error {
req := cb_stack.ListRequest{
ByID: stackId,
func ExistNode(ctx context.Context, nodeId uint64, c *controller.ControllerCfg) error {
req := cb_node.ListRequest{
ByID: nodeId,
}
stackList, err := c.CloudBroker().Stack().List(ctx, req)
nodeList, err := c.CloudBroker().Node().List(ctx, req)
if err != nil {
return err
}
if len(stackList.Data) == 0 {
return fmt.Errorf("stack with id %v not found", stackList)
if len(nodeList.Data) == 0 {
return fmt.Errorf("node with id %v not found", nodeList)
}
return nil
}
// ExistStackInPcidevice checks if compute exists with specified stackId and specified non-deleted rgId.
func ExistStackInPcidevice(ctx context.Context, stackId, rgId uint64, c *controller.ControllerCfg) error {
req := cb_rg.ListRequest{
ByID: rgId,
IncludeDeleted: false,
}
rgList, err := c.CloudBroker().RG().List(ctx, req)
if err != nil {
return err
}
for _, v := range rgList.Data {
for _, idVM := range v.VMs {
req := cb_compute.GetRequest{
ComputeID: idVM,
}
computeRec, err := c.CloudBroker().Compute().Get(ctx, req)
if err != nil {
return err
}
if computeRec.StackID == stackId {
return nil
}
}
}
return fmt.Errorf("no compute found with stack_id %v and rg_id %v", stackId, rgId)
}
func ExistLB(ctx context.Context, lbId uint64, c *controller.ControllerCfg) error {
req := cb_lb.ListRequest{

View File

@@ -1,69 +0,0 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceImageListStacksRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
imageListStacks, err := utilityImageListStacksCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenImageListStacks(imageListStacks))
d.Set("entry_count", imageListStacks.EntryCount)
return nil
}
func DataSourceImageListStacks() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceImageListStacksRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceImageListStacksSchemaMake(),
}
}

View File

@@ -161,40 +161,6 @@ func flattenEco(m interface{}) string {
return output
}
func flattenImageListStacks(stack *image.ListStacks) []map[string]interface{} {
log.Debug("flattenImageListStacks")
temp := make([]map[string]interface{}, 0, len(stack.Data))
for _, item := range stack.Data {
t := map[string]interface{}{
"ckey": item.CKey,
//"meta": flattens.FlattenMeta(item.Meta),
"api_url": item.APIURL,
"api_key": item.APIKey,
"app_id": item.AppID,
"cpu_allocation_ratio": item.CPUAllocationRatio,
"desc": item.Description,
"descr": item.Descr,
"drivers": item.Drivers,
"eco": flattenEco(item.Eco),
"error": item.Error,
"gid": item.GID,
"guid": item.GUID,
"id": item.ID,
"images": item.Images,
"login": item.Login,
"mem_allocation_ratio": item.MemAllocationRatio,
"name": item.Name,
"packages": flattenPackages(item.Packages),
"passwd": item.Password,
"reference_id": item.ReferenceID,
"status": item.Status,
"type": item.Type,
}
temp = append(temp, t)
}
return temp
}
func flattenPackages(pg image.Packages) []map[string]interface{} {
log.Debug("flattenPackages")
res := make([]map[string]interface{}, 0)

View File

@@ -36,291 +36,6 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
func dataSourceImageListStacksSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"image_id": {
Type: schema.TypeInt,
Required: true,
Description: "image id",
},
"sort_by": {
Type: schema.TypeString,
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "page size",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "find by name",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "find by status",
},
"type_image": {
Type: schema.TypeString,
Optional: true,
Description: "find by type",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"api_url": {
Type: schema.TypeString,
Computed: true,
},
"api_key": {
Type: schema.TypeString,
Computed: true,
},
"app_id": {
Type: schema.TypeString,
Computed: true,
},
"cpu_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"descr": {
Type: schema.TypeString,
Computed: true,
},
"drivers": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"eco": {
Type: schema.TypeString,
Computed: true,
},
"error": {
Type: schema.TypeInt,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"login": {
Type: schema.TypeString,
Computed: true,
},
"mem_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"packages": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"libvirt_bin": {
Type: schema.TypeList,
Computed: true,
Description: "libvirt_bin",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"libvirt_daemon": {
Type: schema.TypeList,
Computed: true,
Description: "libvirt_daemon",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"lvm2_lockd": {
Type: schema.TypeList,
Computed: true,
Description: "lvm2_lockd",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"openvswitch_common": {
Type: schema.TypeList,
Computed: true,
Description: "openvswitch_common",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"openvswitch_switch": {
Type: schema.TypeList,
Computed: true,
Description: "openvswitch_switch",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"qemu_system_x86": {
Type: schema.TypeList,
Computed: true,
Description: "qemu_system_x86",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"sanlock": {
Type: schema.TypeList,
Computed: true,
Description: "sanlock",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
},
},
},
"passwd": {
Type: schema.TypeString,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
},
},
Description: "items of stacks list",
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
Description: "entry count",
},
}
}
func dataSourceImageListSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"sep_id": {
@@ -1017,10 +732,6 @@ func resourceCDROMImageSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "unc path",
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"acl": {
Type: schema.TypeList,
Computed: true,
@@ -1531,10 +1242,6 @@ func resourceVirtualImageSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "unc path",
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"acl": {
Type: schema.TypeList,
Computed: true,

View File

@@ -1,76 +0,0 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityImageListStacksCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*image.ListStacks, error) {
c := m.(*controller.ControllerCfg)
req := image.ListStacksRequest{
ImageID: uint64(d.Get("image_id").(int)),
}
if sortBy, ok := d.GetOk("sort_by"); ok {
req.SortBy = sortBy.(string)
}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
if name, ok := d.GetOk("name"); ok {
req.Name = name.(string)
}
if status, ok := d.GetOk("status"); ok {
req.Status = status.(string)
}
if typeImage, ok := d.GetOk("type_image"); ok {
req.Type = typeImage.(string)
}
log.Debugf("utilityImageListStacksCheckPresence: load image list")
imageListStacks, err := c.CloudBroker().Image().ListStacks(ctx, req)
if err != nil {
return nil, err
}
return imageListStacks, nil
}

View File

@@ -1466,12 +1466,14 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "insert ssl certificate in x509 pem format",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Description: "Type of the emulated system.",
},
"lb_sysctl_params": {
"chipset": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"Q35", "i440fx"}, false),
Default: "Q35",
Description: "Type of the emulated system. Possible values: i440fx, Q35. Default: Q35",
},
"lb_sysctl_params": {
Type: schema.TypeList,
Optional: true,
Description: "Custom sysctl values for Load Balancer instance. Applied on boot.",
@@ -1808,17 +1810,19 @@ func resourceK8sWgSchemaMake() map[string]*schema.Schema {
),
Description: "Node RAM in MB.",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Description: "Type of the emulated system.",
},
"disk": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Worker node boot disk size. If unspecified or 0, size is defined by OS image size.",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"Q35", "i440fx"}, false),
Default: "Q35",
Description: "Type of the emulated system. Possible values: i440fx, Q35. Default: Q35",
},
"disk": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Worker node boot disk size. If unspecified or 0, size is defined by OS image size.",
},
"labels": {
Type: schema.TypeList,
Computed: true,

View File

@@ -78,7 +78,7 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute, p
d.Set("need_reboot", computeRec.NeedReboot)
d.Set("numa_node_id", computeRec.NumaNodeId)
d.Set("os_users", flattenOSUsers(computeRec.OSUsers))
d.Set("pinned", computeRec.PinnedToStack)
d.Set("pinned", computeRec.PinnedToNode)
d.Set("preferred_cpu", computeRec.PreferredCPU)
d.Set("ram", computeRec.RAM)
d.Set("reference_id", computeRec.ReferenceID)
@@ -88,9 +88,9 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute, p
d.Set("rg_name", computeRec.RGName)
d.Set("rg_id", computeRec.RGID)
d.Set("snap_sets", flattenSnapSets(computeRec.SnapSets))
d.Set("stack_id", computeRec.StackID)
d.Set("stack_name", computeRec.StackName)
d.Set("started", computeRec.TechStatus == "STARTED")
d.Set("node_id", computeRec.NodeID)
d.Set("node_name", computeRec.NodeName)
d.Set("stateless_sep_id", computeRec.StatelessSEPID)
d.Set("stateless_sep_type", computeRec.StatelessSEPType)
d.Set("status", computeRec.Status)
@@ -100,11 +100,9 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute, p
d.Set("updated_time", computeRec.UpdatedTime)
d.Set("user_data", string(userData))
d.Set("user_managed", computeRec.UserManaged)
d.Set("read_only", computeRec.ReadOnly)
d.Set("vnc_password", computeRec.VNCPassword)
d.Set("vgpus", flattenVGPUs(computeRec.VGPUs))
//TODO
// d.Set("virtual_image_id", computeRec.VirtualImageID)
// d.Set("virtual_image_name", computeRec.VirtualImageName)
d.Set("pci_devices", flattenPCI(*pciList))
d.Set("loader_type", computeRec.LoaderType)
d.Set("boot_type", computeRec.BootType)
@@ -286,6 +284,8 @@ func flattenComputeDisks(disksList compute.ListDisks, disksBlocks, extraDisks []
"delete_by": disk.DeletedBy,
"delete_time": disk.DeletedTime,
"update_time": disk.UpdatedTime,
"cache": disk.Cache,
"blk_discard": disk.BLKDiscard,
}
res = append(res, temp)
indexDataDisks++
@@ -327,79 +327,78 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
devices, _ := json.Marshal(computeItem.Devices)
userData, _ := json.Marshal(computeItem.Userdata)
temp := map[string]interface{}{
"acl": flattenListACLInterface(computeItem.ACL),
"account_id": computeItem.AccountID,
"account_name": computeItem.AccountName,
"affinity_label": computeItem.AffinityLabel,
"affinity_rules": flattenListRules(computeItem.AffinityRules),
"affinity_weight": computeItem.AffinityWeight,
"anti_affinity_rules": flattenListRules(computeItem.AntiAffinityRules),
"arch": computeItem.Arch,
"auto_start_w_node": computeItem.AutoStart,
"chipset": computeItem.Chipset,
"cd_image_id": computeItem.CdImageId,
"boot_order": computeItem.BootOrder,
"boot_image_id": computeItem.BootImageID,
"bootdisk_size": computeItem.BootDiskSize,
"clone_reference": computeItem.CloneReference,
"clones": computeItem.Clones,
"computeci_id": computeItem.ComputeCIID,
"cpus": computeItem.CPUs,
"created_by": computeItem.CreatedBy,
"created_time": computeItem.CreatedTime,
"custom_fields": string(customFields),
"deleted_by": computeItem.DeletedBy,
"deleted_time": computeItem.DeletedTime,
"desc": computeItem.Description,
"devices": string(devices),
"disks": flattenDisks(computeItem.Disks),
"driver": computeItem.Driver,
"gid": computeItem.GID,
"guid": computeItem.GUID,
"hp_backed": computeItem.HPBacked,
"compute_id": computeItem.ID,
"cpu_pin": computeItem.CPUPin,
"interfaces": flattenInterfaces(computeItem.Interfaces),
"live_migration_job_id": computeItem.LiveMigrationJobID,
"lock_status": computeItem.LockStatus,
"manager_id": computeItem.ManagerID,
"manager_type": computeItem.ManagerType,
"migrationjob": computeItem.MigrationJob,
"milestones": computeItem.Milestones,
"nid": computeItem.NID,
"name": computeItem.Name,
"need_reboot": computeItem.NeedReboot,
"numa_affinity": computeItem.NumaAffinity,
"numa_node_id": computeItem.NumaNodeId,
"os_users": flattenOSUsers(computeItem.OSUsers),
"os_version": computeItem.OSVersion,
"pinned": computeItem.PinnedToStack,
"preferred_cpu": computeItem.PreferredCPU,
"qemu_guest": flattenQemuQuest(computeItem.QemuQuest),
"ram": computeItem.RAM,
"reference_id": computeItem.ReferenceID,
"registered": computeItem.Registered,
"res_name": computeItem.ResName,
"reserved_node_cpus": computeItem.ReservedNodeCpus,
"rg_id": computeItem.RGID,
"rg_name": computeItem.RGName,
"snap_sets": flattenSnapSets(computeItem.SnapSets),
"stack_id": computeItem.StackID,
"stack_name": computeItem.StackName,
"stateless_sep_id": computeItem.StatelessSEPID,
"stateless_sep_type": computeItem.StatelessSEPType,
"status": computeItem.Status,
"tags": flattenTags(computeItem.Tags),
"tech_status": computeItem.TechStatus,
"total_disk_size": computeItem.TotalDiskSize,
"updated_by": computeItem.UpdatedBy,
"updated_time": computeItem.UpdatedTime,
"user_data": string(userData),
"user_managed": computeItem.UserManaged,
"vgpus": computeItem.VGPUs,
"vins_connected": computeItem.VINSConnected,
//TODO
// "virtual_image_id": computeItem.VirtualImageID,
"acl": flattenListACLInterface(computeItem.ACL),
"account_id": computeItem.AccountID,
"account_name": computeItem.AccountName,
"affinity_label": computeItem.AffinityLabel,
"affinity_rules": flattenListRules(computeItem.AffinityRules),
"affinity_weight": computeItem.AffinityWeight,
"anti_affinity_rules": flattenListRules(computeItem.AntiAffinityRules),
"arch": computeItem.Arch,
"auto_start_w_node": computeItem.AutoStart,
"chipset": computeItem.Chipset,
"cd_image_id": computeItem.CdImageId,
"boot_order": computeItem.BootOrder,
"boot_image_id": computeItem.BootImageID,
"bootdisk_size": computeItem.BootDiskSize,
"clone_reference": computeItem.CloneReference,
"clones": computeItem.Clones,
"computeci_id": computeItem.ComputeCIID,
"cpus": computeItem.CPUs,
"created_by": computeItem.CreatedBy,
"created_time": computeItem.CreatedTime,
"custom_fields": string(customFields),
"deleted_by": computeItem.DeletedBy,
"deleted_time": computeItem.DeletedTime,
"desc": computeItem.Description,
"devices": string(devices),
"disks": flattenDisks(computeItem.Disks),
"driver": computeItem.Driver,
"gid": computeItem.GID,
"guid": computeItem.GUID,
"hp_backed": computeItem.HPBacked,
"compute_id": computeItem.ID,
"cpu_pin": computeItem.CPUPin,
"interfaces": flattenInterfaces(computeItem.Interfaces),
"live_migration_job_id": computeItem.LiveMigrationJobID,
"lock_status": computeItem.LockStatus,
"manager_id": computeItem.ManagerID,
"manager_type": computeItem.ManagerType,
"migrationjob": computeItem.MigrationJob,
"milestones": computeItem.Milestones,
"nid": computeItem.NID,
"name": computeItem.Name,
"need_reboot": computeItem.NeedReboot,
"numa_affinity": computeItem.NumaAffinity,
"numa_node_id": computeItem.NumaNodeId,
"os_users": flattenOSUsers(computeItem.OSUsers),
"os_version": computeItem.OSVersion,
"pinned": computeItem.PinnedToNode,
"preferred_cpu": computeItem.PreferredCPU,
"qemu_guest": flattenQemuQuest(computeItem.QemuQuest),
"ram": computeItem.RAM,
"reference_id": computeItem.ReferenceID,
"registered": computeItem.Registered,
"res_name": computeItem.ResName,
"reserved_node_cpus": computeItem.ReservedNodeCpus,
"rg_id": computeItem.RGID,
"rg_name": computeItem.RGName,
"snap_sets": flattenSnapSets(computeItem.SnapSets),
"node_id": computeItem.NodeID,
"node_name": computeItem.NodeName,
"stateless_sep_id": computeItem.StatelessSEPID,
"stateless_sep_type": computeItem.StatelessSEPType,
"status": computeItem.Status,
"tags": flattenTags(computeItem.Tags),
"tech_status": computeItem.TechStatus,
"total_disk_size": computeItem.TotalDiskSize,
"updated_by": computeItem.UpdatedBy,
"updated_time": computeItem.UpdatedTime,
"user_data": string(userData),
"user_managed": computeItem.UserManaged,
"read_only": computeItem.ReadOnly,
"vgpus": computeItem.VGPUs,
"vins_connected": computeItem.VINSConnected,
"loader_type": computeItem.LoaderType,
"boot_type": computeItem.BootType,
"hot_resize": computeItem.HotResize,
@@ -419,78 +418,74 @@ func flattenDeletedComputeList(computes *compute.ListDeletedComputes) []map[stri
devices, _ := json.Marshal(computeItem.Devices)
userData, _ := json.Marshal(computeItem.Userdata)
temp := map[string]interface{}{
"acl": flattenListACLInterface(computeItem.ACL),
"account_id": computeItem.AccountID,
"account_name": computeItem.AccountName,
"affinity_label": computeItem.AffinityLabel,
"affinity_rules": flattenListRules(computeItem.AffinityRules),
"affinity_weight": computeItem.AffinityWeight,
"anti_affinity_rules": flattenListRules(computeItem.AntiAffinityRules),
"arch": computeItem.Arch,
"auto_start_w_node": computeItem.AutoStart,
"chipset": computeItem.Chipset,
"cd_image_id": computeItem.CdImageId,
"boot_order": computeItem.BootOrder,
"bootdisk_size": computeItem.BootDiskSize,
"boot_image_id": computeItem.BootImageID,
"clone_reference": computeItem.CloneReference,
"clones": computeItem.Clones,
"computeci_id": computeItem.ComputeCIID,
"cpus": computeItem.CPUs,
"created_by": computeItem.CreatedBy,
"created_time": computeItem.CreatedTime,
"custom_fields": string(customFields),
"deleted_by": computeItem.DeletedBy,
"deleted_time": computeItem.DeletedTime,
"desc": computeItem.Description,
"devices": string(devices),
"disks": flattenDisks(computeItem.Disks),
"driver": computeItem.Driver,
"gid": computeItem.GID,
"guid": computeItem.GUID,
"hp_backed": computeItem.HPBacked,
"compute_id": computeItem.ID,
"cpu_pin": computeItem.CPUPin,
//TODO
// "image_id": computeItem.ImageID,
"interfaces": flattenInterfaces(computeItem.Interfaces),
"lock_status": computeItem.LockStatus,
"manager_id": computeItem.ManagerID,
"manager_type": computeItem.ManagerType,
"migrationjob": computeItem.MigrationJob,
"milestones": computeItem.Milestones,
"name": computeItem.Name,
"need_reboot": computeItem.NeedReboot,
"numa_affinity": computeItem.NumaAffinity,
"numa_node_id": computeItem.NumaNodeId,
"os_users": flattenOSUsers(computeItem.OSUsers),
"os_version": computeItem.OSVersion,
"pinned": computeItem.PinnedToStack,
"preferred_cpu": computeItem.PreferredCPU,
"ram": computeItem.RAM,
"reference_id": computeItem.ReferenceID,
"registered": computeItem.Registered,
"res_name": computeItem.ResName,
"reserved_node_cpus": computeItem.ReservedNodeCpus,
"rg_id": computeItem.RGID,
"rg_name": computeItem.RGName,
"snap_sets": flattenSnapSets(computeItem.SnapSets),
"stack_id": computeItem.StackID,
"stack_name": computeItem.StackName,
"stateless_sep_id": computeItem.StatelessSEPID,
"stateless_sep_type": computeItem.StatelessSEPType,
"status": computeItem.Status,
"tags": flattenTags(computeItem.Tags),
"tech_status": computeItem.TechStatus,
"total_disk_size": computeItem.TotalDiskSize,
"updated_by": computeItem.UpdatedBy,
"updated_time": computeItem.UpdatedTime,
"user_data": string(userData),
"user_managed": computeItem.UserManaged,
"vgpus": computeItem.VGPUs,
"vins_connected": computeItem.VINSConnected,
//TODO
// "virtual_image_id": computeItem.VirtualImageID,
"acl": flattenListACLInterface(computeItem.ACL),
"account_id": computeItem.AccountID,
"account_name": computeItem.AccountName,
"affinity_label": computeItem.AffinityLabel,
"affinity_rules": flattenListRules(computeItem.AffinityRules),
"affinity_weight": computeItem.AffinityWeight,
"anti_affinity_rules": flattenListRules(computeItem.AntiAffinityRules),
"arch": computeItem.Arch,
"auto_start_w_node": computeItem.AutoStart,
"chipset": computeItem.Chipset,
"cd_image_id": computeItem.CdImageId,
"boot_order": computeItem.BootOrder,
"bootdisk_size": computeItem.BootDiskSize,
"boot_image_id": computeItem.BootImageID,
"clone_reference": computeItem.CloneReference,
"clones": computeItem.Clones,
"computeci_id": computeItem.ComputeCIID,
"cpus": computeItem.CPUs,
"created_by": computeItem.CreatedBy,
"created_time": computeItem.CreatedTime,
"custom_fields": string(customFields),
"deleted_by": computeItem.DeletedBy,
"deleted_time": computeItem.DeletedTime,
"desc": computeItem.Description,
"devices": string(devices),
"disks": flattenDisks(computeItem.Disks),
"driver": computeItem.Driver,
"gid": computeItem.GID,
"guid": computeItem.GUID,
"hp_backed": computeItem.HPBacked,
"compute_id": computeItem.ID,
"cpu_pin": computeItem.CPUPin,
"interfaces": flattenInterfaces(computeItem.Interfaces),
"lock_status": computeItem.LockStatus,
"manager_id": computeItem.ManagerID,
"manager_type": computeItem.ManagerType,
"migrationjob": computeItem.MigrationJob,
"milestones": computeItem.Milestones,
"name": computeItem.Name,
"need_reboot": computeItem.NeedReboot,
"numa_affinity": computeItem.NumaAffinity,
"numa_node_id": computeItem.NumaNodeId,
"os_users": flattenOSUsers(computeItem.OSUsers),
"os_version": computeItem.OSVersion,
"pinned": computeItem.PinnedToNode,
"preferred_cpu": computeItem.PreferredCPU,
"ram": computeItem.RAM,
"reference_id": computeItem.ReferenceID,
"registered": computeItem.Registered,
"res_name": computeItem.ResName,
"reserved_node_cpus": computeItem.ReservedNodeCpus,
"rg_id": computeItem.RGID,
"rg_name": computeItem.RGName,
"snap_sets": flattenSnapSets(computeItem.SnapSets),
"node_id": computeItem.NodeID,
"node_name": computeItem.NodeName,
"stateless_sep_id": computeItem.StatelessSEPID,
"stateless_sep_type": computeItem.StatelessSEPType,
"status": computeItem.Status,
"tags": flattenTags(computeItem.Tags),
"tech_status": computeItem.TechStatus,
"total_disk_size": computeItem.TotalDiskSize,
"updated_by": computeItem.UpdatedBy,
"updated_time": computeItem.UpdatedTime,
"user_data": string(userData),
"user_managed": computeItem.UserManaged,
"vgpus": computeItem.VGPUs,
"vins_connected": computeItem.VINSConnected,
"loader_type": computeItem.LoaderType,
"boot_type": computeItem.BootType,
"hot_resize": computeItem.HotResize,
@@ -574,6 +569,7 @@ func flattenDisks(disks []compute.InfoDisk) []map[string]interface{} {
// "bus_number": disk.BusNumber,
"disk_id": disk.ID,
// "pci_slot": disk.PCISlot,
"sep_id": disk.SepID,
}
res = append(res, temp)
}
@@ -690,7 +686,7 @@ func flattenPCIDevice(deviceList []compute.ItemPCIDevice) []map[string]interface
"device_id": dev.ID,
"name": dev.Name,
"rg_id": dev.RGID,
"stack_id": dev.StackID,
"node_id": dev.NodeID,
"status": dev.Status,
"system_name": dev.SystemName,
}
@@ -809,7 +805,7 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
d.Set("numa_affinity", compFacts.NumaAffinity)
d.Set("numa_node_id", compFacts.NumaNodeId)
d.Set("os_users", flattenOSUsers(compFacts.OSUsers))
d.Set("pinned", compFacts.PinnedToStack)
d.Set("pinned", compFacts.PinnedToNode)
d.Set("preferred_cpu", compFacts.PreferredCPU)
d.Set("qemu_guest", flattenQemuQuest(compFacts.QemuQuest))
d.Set("ram", compFacts.RAM)
@@ -820,8 +816,8 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
d.Set("rg_id", compFacts.RGID)
d.Set("rg_name", compFacts.RGName)
d.Set("snap_sets", flattenSnapSets(compFacts.SnapSets))
d.Set("stack_id", compFacts.StackID)
d.Set("stack_name", compFacts.StackName)
d.Set("node_id", compFacts.NodeID)
d.Set("node_name", compFacts.NodeName)
d.Set("stateless_sep_id", compFacts.StatelessSEPID)
d.Set("stateless_sep_type", compFacts.StatelessSEPType)
d.Set("status", compFacts.Status)
@@ -831,11 +827,9 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
d.Set("updated_time", compFacts.UpdatedTime)
d.Set("user_data", string(userData))
d.Set("user_managed", compFacts.UserManaged)
d.Set("read_only", compFacts.ReadOnly)
d.Set("vnc_password", compFacts.VNCPassword)
d.Set("vgpus", flattenVGPUs(compFacts.VGPUs))
//TODO
// d.Set("virtual_image_id", compFacts.VirtualImageID)
// d.Set("virtual_image_name", compFacts.VirtualImageName)
d.Set("pci_devices", flattenPCI(*pciList))
d.Set("loader_type", compFacts.LoaderType)
d.Set("boot_type", compFacts.BootType)
@@ -872,6 +866,7 @@ func parseComputeInterfacesToNetworks(networks []interface{}, ifaces compute.Lis
elem["ip_address"] = value.IPAddress
elem["mac"] = value.MAC
elem["mtu"] = value.MTU
elem["net_mask"] = value.NetMask
elem["sdn_interface_id"] = value.SDNInterfaceID
elem["weight"] = flattenNetworkWeight(networks, value.NetID, value.NetType)
elem["enabled"] = value.Enabled
@@ -900,6 +895,7 @@ func flattenDisk(diskList compute.ListDisks) []map[string]interface{} {
"ckey": disk.CKey,
"meta": flattens.FlattenMeta(disk.Meta),
"account_id": disk.AccountID,
"blk_discard": disk.BLKDiscard,
"boot_partition": disk.BootPartition,
"bus_number": disk.BusNumber,
"created_time": disk.CreatedTime,

View File

@@ -73,8 +73,8 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
createReqX86.Pool = pool.(string)
}
if stackID, ok := d.GetOk("stack_id"); ok {
createReqX86.StackID = uint64(stackID.(int))
if nodeID, ok := d.GetOk("node_id"); ok {
createReqX86.NodeID = uint64(nodeID.(int))
}
if ipaType, ok := d.GetOk("ipa_type"); ok {
@@ -85,6 +85,10 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
createReqX86.BootDisk = uint64(bootSize.(int))
}
if bootDiskCache, ok := d.GetOk("boot_disk_cache"); ok {
createReqX86.BootDiskCache = bootDiskCache.(string)
}
if IS, ok := d.GetOk("is"); ok {
createReqX86.IS = IS.(string)
}
@@ -93,6 +97,10 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
createReqX86.ZoneID = uint64(zoneID.(int))
}
if bootDiskBLKDiscard, ok := d.GetOk("boot_disk_blk_discard"); ok {
createReqX86.BootDiskBLKDiscard = bootDiskBLKDiscard.(bool)
}
createReqX86.Interfaces = make([]kvmx86.Interface, 0)
if networks, ok := d.GetOk("network"); ok {
@@ -126,6 +134,12 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
reqInterface.MTU = uint64(netInterfaceVal["mtu"].(int))
}
if reqInterface.NetType == "DPDK" || reqInterface.NetType == "VFNIC" {
if netMask, netMaskSet := netInterfaceVal["net_mask"]; netMaskSet {
reqInterface.NetMask = uint64(netMask.(int))
}
}
ipaddr, ipSet := netInterfaceVal["ip_address"]
if ipSet {
reqInterface.IPAddr = ipaddr.(string)
@@ -170,6 +184,12 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
if imageID, ok := diskVal["image_id"]; ok {
reqDataDisk.ImageID = uint64(imageID.(int))
}
if cache, ok := diskVal["cache"]; ok {
reqDataDisk.Cache = cache.(string)
}
if blkDiscard, ok := diskVal["blk_discard"]; ok {
reqDataDisk.BLKDiscard = blkDiscard.(bool)
}
disksX86 = append(disksX86, reqDataDisk)
}
@@ -328,6 +348,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
if !cleanup {
if enabled, ok := d.GetOk("enabled"); ok {
if enabled.(bool) {
req := compute.EnableRequest{ComputeID: computeId}
@@ -424,6 +445,9 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
if start, ok := d.GetOk("started"); ok && start.(bool) {
req := compute.StartRequest{ComputeID: computeId}
if nodeID, ok := d.GetOk("node_id"); ok {
req.NodeID = uint64(nodeID.(int))
}
log.Debugf("resourceComputeCreate: starting Compute ID %d after completing its resource configuration", computeId)
if _, err := c.CloudBroker().Compute().Start(ctx, req); err != nil {
warnings.Add(err)
@@ -436,19 +460,16 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if pin, ok := d.GetOk("pin_to_stack"); ok && pin.(bool) {
if pin, ok := d.GetOk("pin_to_node"); ok && pin.(bool) {
start, _ := d.GetOk("started")
_, stackOk := d.GetOk("stack_id")
_, nodeOk := d.GetOk("node_id")
if !start.(bool) && !stackOk {
warnings.Add(errors.New("cannot pin to stack a VM, that is not started and stack_id is not set"))
if !start.(bool) && !nodeOk {
warnings.Add(errors.New("cannot pin to node a VM, that is not started and node_id is not set"))
} else {
req := compute.PinToStackRequest{
ComputeID: computeId,
}
if stackID, ok := d.Get("stack_id").(int); ok {
req.TargetStackID = uint64(stackID)
req := compute.PinToNodeRequest{
ComputeID: computeId,
TargetNodeID: uint64(d.Get("node_id").(int)),
}
if force, ok := d.Get("force_pin").(bool); ok {
@@ -459,7 +480,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
req.AutoStart = autoStart
}
_, err := c.CloudBroker().Compute().PinToStack(ctx, req)
_, err := c.CloudBroker().Compute().PinToNode(ctx, req)
if err != nil {
warnings.Add(err)
}
@@ -628,7 +649,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if !d.Get("pin_to_stack").(bool) && d.Get("auto_start_w_node").(bool) {
if !d.Get("pin_to_node").(bool) && d.Get("auto_start_w_node").(bool) {
req := compute.UpdateRequest{
ComputeID: computeId,
AutoStart: d.Get("auto_start_w_node").(bool),
@@ -658,6 +679,14 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if readOnly, ok := d.GetOk("read_only"); ok {
if readOnly.(bool) {
if err := utilityComputeUpdateReadOnly(ctx, d, m); err != nil {
warnings.Add(err)
}
}
}
}
log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", computeId, d.Get("name").(string))
@@ -805,6 +834,18 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if d.HasChange("boot_disk_cache") {
if err := utilityComputeUpdateBootDiskCache(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("boot_disk_blk_discard") {
if err := utilityComputeUpdateBootDiskBLKDiscard(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("extra_disks") {
err := utilityComputeExtraDisksConfigure(ctx, d, m, true) // pass do_delta = true to apply changes, if any
if err != nil {
@@ -812,8 +853,8 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if d.HasChange("pin_to_stack") {
if err := utilityComputePinToStack(ctx, d, m); err != nil {
if d.HasChange("pin_to_node") {
if err := utilityComputePinToNode(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
@@ -946,6 +987,12 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if d.HasChange("read_only") {
if err := utilityComputeUpdateReadOnly(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
return append(resourceComputeRead(ctx, d, m), warnings.Get()...)
}
@@ -980,12 +1027,12 @@ func ResourceCompute() *schema.Resource {
SchemaVersion: 2,
CustomizeDiff: func(ctx context.Context, diff *schema.ResourceDiff, i interface{}) error {
if diff.HasChanges() || diff.HasChanges("chipset", "pin_to_stack", "auto_start_w_node", "libvirt_settings", "network", "affinity_rules", "anti_affinity_rules",
if diff.HasChanges() || diff.HasChanges("chipset", "pin_to_node", "auto_start_w_node", "libvirt_settings", "network", "affinity_rules", "anti_affinity_rules",
"extra_disks", "tags", "port_forwarding", "user_access", "snapshot", "pci_devices", "preferred_cpu", "security_groups") {
diff.SetNewComputed("updated_time")
diff.SetNewComputed("updated_by")
}
if diff.HasChanges("pin_to_stack") {
if diff.HasChanges("pin_to_node") {
diff.SetNewComputed("pinned")
}
if diff.HasChanges("started") {

View File

@@ -228,6 +228,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"blk_discard": {
Type: schema.TypeBool,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
@@ -236,6 +240,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"cache": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
@@ -939,11 +947,11 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},
"stack_name": {
"node_name": {
Type: schema.TypeString,
Computed: true,
},
@@ -1011,6 +1019,11 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"read_only": {
Type: schema.TypeBool,
Computed: true,
Description: "Shows if compute is locked to read-only operations.",
},
"vnc_password": {
Type: schema.TypeString,
Computed: true,
@@ -1100,14 +1113,6 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"virtual_image_id": {
Type: schema.TypeInt,
Computed: true,
},
"virtual_image_name": {
Type: schema.TypeString,
Computed: true,
},
//extra parameters
"boot_disk_size": {
Type: schema.TypeInt,
@@ -1181,7 +1186,7 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "Find by tech status",
},
"stack_name": {
"node_name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by node name.",
@@ -1196,10 +1201,10 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "Find by IP address",
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by stack ID",
Description: "Find by node ID",
},
"cd_image_id": {
Type: schema.TypeInt,
@@ -1460,6 +1465,10 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
@@ -1483,10 +1492,6 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
},
"interfaces": {
Type: schema.TypeList,
Computed: true,
@@ -1829,11 +1834,11 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
},
},
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},
"stack_name": {
"node_name": {
Type: schema.TypeString,
Computed: true,
Description: "Find by node name.",
@@ -1890,6 +1895,11 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"read_only": {
Type: schema.TypeBool,
Computed: true,
Description: "Shows if compute is in read-only mode.",
},
"vgpus": {
Type: schema.TypeList,
Computed: true,
@@ -1901,10 +1911,6 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"virtual_image_id": {
Type: schema.TypeInt,
Computed: true,
},
"loader_type": {
Type: schema.TypeString,
Computed: true,
@@ -2238,10 +2244,6 @@ func dataSourceComputeListDeletedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
},
"interfaces": {
Type: schema.TypeList,
Computed: true,
@@ -2545,11 +2547,11 @@ func dataSourceComputeListDeletedSchemaMake() map[string]*schema.Schema {
},
},
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},
"stack_name": {
"node_name": {
Type: schema.TypeString,
Computed: true,
Description: "Find by node name.",
@@ -2617,10 +2619,6 @@ func dataSourceComputeListDeletedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"virtual_image_id": {
Type: schema.TypeInt,
Computed: true,
},
"loader_type": {
Type: schema.TypeString,
Computed: true,
@@ -3180,7 +3178,7 @@ func dataSourceComputePCIDeviceListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},
@@ -3406,12 +3404,24 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "If True, the imageId, bootDisk, sepId, pool parameters are ignored and the compute is created without a boot disk in the stopped state.",
},
"boot_disk_blk_discard": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"boot_disk_size": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image.",
},
"boot_disk_cache": {
Type: schema.TypeString,
Optional: true,
Default: "none",
ValidateFunc: validation.StringInSlice([]string{"none", "writethrough"}, false),
Description: "Setting the boot disk caching mode",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
@@ -3447,11 +3457,11 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Default: 0,
Description: "ID of CD-ROM live image to boot",
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "ID of stack to start compute",
Description: "ID of node to start compute",
},
"is": {
Type: schema.TypeString,
@@ -3521,16 +3531,22 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
DiffSuppressFunc: networkSubresIPAddreDiffSupperss,
Description: "unique_identifier of LogicalPort on SDN side",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "network enable flag",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "network enable flag",
},
"net_mask": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Subnet mask, used only for DPDK and VFNIC network types",
},
},
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.",
},
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.",
},
"libvirt_settings": {
Type: schema.TypeSet,
@@ -3654,7 +3670,7 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
"value": {
Type: schema.TypeString,
Optional: true,
Required: true,
Description: "value that must match the key to be taken into account when analyzing this rule",
},
},
@@ -3695,7 +3711,7 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
"value": {
Type: schema.TypeString,
Optional: true,
Required: true,
Description: "value that must match the key to be taken into account when analyzing this rule",
},
},
@@ -3704,8 +3720,16 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
"disks": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cache": {
Type: schema.TypeString,
Optional: true,
Default: "none",
ValidateFunc: validation.StringInSlice([]string{"none", "writethrough"}, false),
Description: "Setting the disk caching mode",
},
"disk_name": {
Type: schema.TypeString,
Required: true,
@@ -3721,6 +3745,11 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Required: true,
Description: "Storage policy id of disk. The rules of the specified storage policy will be used.",
},
"blk_discard": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
@@ -3927,7 +3956,7 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"pin_to_stack": {
"pin_to_node": {
Type: schema.TypeBool,
Optional: true,
Default: false,
@@ -4487,10 +4516,10 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"stack_name": {
"node_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the stack, on which VM started",
Description: "Name of the node, on which VM started",
},
"stateless_sep_id": {
Type: schema.TypeInt,
@@ -4520,6 +4549,12 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"read_only": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Sets read-only mode for this compute. Only data operations allowed when enabled.",
},
"vnc_password": {
Type: schema.TypeString,
Computed: true,
@@ -4609,10 +4644,6 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"virtual_image_id": {
Type: schema.TypeInt,
Computed: true,
},
"virtual_image_name": {
Type: schema.TypeString,
Computed: true,

View File

@@ -35,14 +35,17 @@ import (
"context"
"errors"
"fmt"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/hashicorp/go-cty/cty"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/tasks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@@ -88,8 +91,8 @@ func utilityComputeStarted(ctx context.Context, d *schema.ResourceData, m interf
if altBootId, ok := d.Get("alt_boot_id").(int); ok {
req.AltBootID = uint64(altBootId)
}
if stackId, ok := d.Get("stack_id").(int); ok {
req.StackID = uint64(stackId)
if nodeId, ok := d.Get("node_id").(int); ok {
req.NodeID = uint64(nodeId)
}
if _, err := c.CloudBroker().Compute().Start(ctx, req); err != nil {
return err
@@ -108,6 +111,28 @@ func utilityComputeStarted(ctx context.Context, d *schema.ResourceData, m interf
return nil
}
func utilityComputeUpdateReadOnly(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, err := strconv.ParseUint(d.Id(), 10, 64)
if err != nil {
return err
}
req := compute.ChangeReadOnlyRequest{
ComputeID: computeId,
ReadOnly: d.Get("read_only").(bool),
}
if _, err := c.CloudBroker().Compute().ChangeReadOnly(ctx, req); err != nil {
return err
}
log.Debugf("resourceCompute: read_only=%t for Compute ID %d", req.ReadOnly, computeId)
return nil
}
func utilityComputeResize(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
@@ -235,6 +260,9 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
resizedDisks := make([]interface{}, 0)
renamedDisks := make([]interface{}, 0)
changeStoragePolicyDisks := make([]interface{}, 0)
cacheUpdatedDisks := make([]interface{}, 0)
blkDiscardUpdatedDisks := make([]interface{}, 0)
migratedDisks := make([]interface{}, 0)
presentNewDisks := make([]interface{}, 0)
presentOldDisks := make([]interface{}, 0)
@@ -280,9 +308,18 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
if isRenameDisk(oldConv, el) {
renamedDisks = append(renamedDisks, el)
}
if isChangeStoragePolicy(oldConv, el) {
if isMigrateDisk(oldConv, el) {
migratedDisks = append(migratedDisks, el)
} else if isChangeStoragePolicy(oldConv, el) {
changeStoragePolicyDisks = append(changeStoragePolicyDisks, el)
}
if isChangeCacheDisk(oldConv, el) {
cacheUpdatedDisks = append(cacheUpdatedDisks, el)
}
if isChangeBLKDiscardDisk(oldConv, el) {
blkDiscardUpdatedDisks = append(blkDiscardUpdatedDisks, el)
}
}
if len(deletedDisks) > 0 {
@@ -332,6 +369,9 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
if diskConv["image_id"].(int) != 0 {
req.ImageID = uint64(diskConv["image_id"].(int))
}
if cacheVal, ok := diskConv["cache"].(string); ok {
req.Cache = cacheVal
}
diskID, err := c.CloudBroker().Compute().DiskAdd(ctx, req)
if err != nil {
return err
@@ -409,6 +449,58 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
}
}
if len(cacheUpdatedDisks) > 0 {
for _, disk := range cacheUpdatedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_type"].(string) == "B" {
continue
}
diskID := uint64(diskConv["disk_id"].(int))
if diskID == 0 {
continue
}
req := disks.UpdateRequest{
DiskID: diskID,
Cache: diskConv["cache"].(string),
}
_, err := c.CloudBroker().Disks().Update(ctx, req)
if err != nil {
return err
}
}
}
if len(blkDiscardUpdatedDisks) > 0 {
for _, disk := range blkDiscardUpdatedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_type"].(string) == "B" {
continue
}
diskID := uint64(diskConv["disk_id"].(int))
if diskID == 0 {
continue
}
req := disks.UpdateRequest{
DiskID: diskID,
BLKDiscard: diskConv["blk_discard"].(bool),
}
_, err := c.CloudBroker().Disks().Update(ctx, req)
if err != nil {
return err
}
}
}
if len(migratedDisks) > 0 {
if err := utilityComputeMigrateDisks(ctx, d, m, migratedDisks, oldConv); err != nil {
return err
}
}
for i := range presentNewDisks {
newDisk := presentNewDisks[i].(map[string]interface{})
oldDisk := presentOldDisks[i].(map[string]interface{})
@@ -449,6 +541,56 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
return nil
}
func utilityComputeUpdateBootDiskCache(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
newCache := d.Get("boot_disk_cache").(string)
var bootDiskID uint64
if v, ok := d.GetOk("boot_disk_id"); ok {
if id, ok := v.(int); ok {
bootDiskID = uint64(id)
}
}
if bootDiskID == 0 {
return fmt.Errorf("cannot update boot_disk_cache: boot disk ID is unknown for compute %s", d.Id())
}
req := disks.UpdateRequest{
DiskID: bootDiskID,
Cache: newCache,
}
_, err := c.CloudBroker().Disks().Update(ctx, req)
return err
}
func utilityComputeUpdateBootDiskBLKDiscard(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
newBLKDiscard := d.Get("boot_disk_blk_discard").(bool)
var bootDiskID uint64
if v, ok := d.GetOk("boot_disk_id"); ok {
if id, ok := v.(int); ok {
bootDiskID = uint64(id)
}
}
if bootDiskID == 0 {
return fmt.Errorf("cannot update boot_disk_cache: boot disk ID is unknown for compute %s", d.Id())
}
req := disks.UpdateRequest{
DiskID: bootDiskID,
BLKDiscard: newBLKDiscard,
}
_, err := c.CloudBroker().Disks().Update(ctx, req)
return err
}
func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error {
c := m.(*controller.ControllerCfg)
@@ -709,6 +851,12 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
req.MTU = uint64(netData["mtu"].(int))
}
if req.NetType == "DPDK" || req.NetType == "VFNIC" {
if netMask, ok := netData["net_mask"].(int); ok && netMask > 0 {
req.NetMask = uint64(netMask)
}
}
if netData["sdn_interface_id"].(string) != "" {
req.SDNInterfaceID = netData["sdn_interface_id"].(string)
}
@@ -1613,33 +1761,33 @@ func utilityComputeUpdateCD(ctx context.Context, d *schema.ResourceData, m inter
return nil
}
func utilityComputePinToStack(ctx context.Context, d *schema.ResourceData, m interface{}) error {
func utilityComputePinToNode(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
start, _ := d.GetOk("started")
_, stackOk := d.GetOk("stack_id")
if !start.(bool) && !stackOk {
errors.New("cannot pin to stack a VM, that is not started and stack_id is not set")
}
oldPin, newPin := d.GetChange("pin_to_stack")
oldPin, newPin := d.GetChange("pin_to_node")
if oldPin.(bool) && !newPin.(bool) {
req := compute.UnpinFromStackRequest{
req := compute.UnpinFromNodeRequest{
ComputeID: computeId,
}
_, err := c.CloudBroker().Compute().UnpinFromStack(ctx, req)
_, err := c.CloudBroker().Compute().UnpinFromNode(ctx, req)
if err != nil {
return err
}
}
if !oldPin.(bool) && newPin.(bool) {
req := compute.PinToStackRequest{
ComputeID: computeId,
TargetStackID: uint64(d.Get("stack_id").(int)),
start, _ := d.GetOk("started")
_, nodeOk := d.GetOk("node_id")
if !start.(bool) && !nodeOk {
return errors.New("cannot pin to node a VM, that is not started and node_id is not set")
}
req := compute.PinToNodeRequest{
ComputeID: computeId,
TargetNodeID: uint64(d.Get("node_id").(int)),
}
if force, ok := d.Get("force_pin").(bool); ok {
@@ -1650,7 +1798,7 @@ func utilityComputePinToStack(ctx context.Context, d *schema.ResourceData, m int
req.AutoStart = autoStart
}
_, err := c.CloudBroker().Compute().PinToStack(ctx, req)
_, err := c.CloudBroker().Compute().PinToNode(ctx, req)
if err != nil {
return err
}
@@ -1905,6 +2053,30 @@ func isRenameDisk(els []interface{}, el interface{}) bool {
return false
}
func isChangeCacheDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["disk_id"].(int) == elConv["disk_id"].(int) &&
elOldConv["cache"].(string) != elConv["cache"].(string) {
return true
}
}
return false
}
func isChangeBLKDiscardDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["disk_id"].(int) == elConv["disk_id"].(int) &&
elOldConv["blk_discard"].(bool) != elConv["blk_discard"].(bool) {
return true
}
}
return false
}
func isChangeStoragePolicy(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
@@ -1917,6 +2089,22 @@ func isChangeStoragePolicy(els []interface{}, el interface{}) bool {
return false
}
func isMigrateDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["disk_id"].(int) == elConv["disk_id"].(int) &&
elOldConv["disk_id"].(int) != 0 {
sepIDChanged := elOldConv["sep_id"].(int) != elConv["sep_id"].(int)
poolChanged := elOldConv["pool"].(string) != elConv["pool"].(string)
if sepIDChanged || poolChanged {
return true
}
}
}
return false
}
func isContainsDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
@@ -1948,6 +2136,105 @@ func isChangeNodesDisk(els []interface{}, elOld interface{}) (bool, interface{})
return false, nil
}
func utilityComputeMigrateDisks(ctx context.Context, d *schema.ResourceData, m interface{}, migratedDisks, oldDisks []interface{}) error {
c := m.(*controller.ControllerCfg)
for _, disk := range migratedDisks {
diskConv := disk.(map[string]interface{})
oldDiskID := uint64(diskConv["disk_id"].(int))
if oldDiskID == 0 {
log.Debugf("utilityComputeMigrateDisks: skipping disk with id=0")
continue
}
newSepID := uint64(diskConv["sep_id"].(int))
newPool := diskConv["pool"].(string)
storagePolicyID := uint64(diskConv["storage_policy_id"].(int))
log.Debugf("utilityComputeMigrateDisks: migrating disk_id=%d to sep_id=%d, pool=%s", oldDiskID, newSepID, newPool)
migrateReq := disks.MigrateRequest{
DiskID: oldDiskID,
SEPID: newSepID,
PoolName: newPool,
StoragePolicyID: storagePolicyID,
}
taskID, err := c.CloudBroker().Disks().Migrate(ctx, migrateReq)
if err != nil {
return fmt.Errorf("failed to start disk migration for disk_id=%d: %w", oldDiskID, err)
}
log.Debugf("utilityComputeMigrateDisks: disk migration started, taskID=%s", taskID)
newDiskID, err := utilityComputeWaitForMigrationTask(ctx, c, taskID, oldDiskID)
if err != nil {
return fmt.Errorf("disk migration task failed for disk_id=%d: %w", oldDiskID, err)
}
log.Debugf("utilityComputeMigrateDisks: disk migration completed, old_disk_id=%d, new_disk_id=%d", oldDiskID, newDiskID)
}
return nil
}
func utilityComputeWaitForMigrationTask(ctx context.Context, c *controller.ControllerCfg, taskID string, oldDiskID uint64) (uint64, error) {
for {
time.Sleep(15 * time.Second)
taskReq := tasks.GetRequest{
AuditID: strings.Trim(taskID, `"`),
}
taskInfo, err := c.CloudBroker().Tasks().Get(ctx, taskReq)
if err != nil {
return 0, fmt.Errorf("failed to get task status: %w", err)
}
log.Debugf("utilityComputeWaitForMigrationTask: taskID=%s, completed=%t, status=%s", taskID, taskInfo.Completed, taskInfo.Status)
if taskInfo.Completed {
if taskInfo.Error != "" {
return 0, fmt.Errorf("migration task failed with error: %s", taskInfo.Error)
}
resultStr, err := taskInfo.Result.ToString()
if err != nil {
return 0, fmt.Errorf("failed to get task result: %w", err)
}
log.Debugf("utilityComputeWaitForMigrationTask: migration result: %s", resultStr)
newDiskID, err := extractNewDiskIDFromResult(resultStr)
if err != nil {
return 0, fmt.Errorf("failed to parse migration result: %w", err)
}
return newDiskID, nil
}
}
}
func extractNewDiskIDFromResult(result string) (uint64, error) {
re := regexp.MustCompile(`Disk ID \d+ successfully migrated to Disk ID (\d+)`)
matches := re.FindStringSubmatch(result)
if len(matches) < 2 {
return 0, fmt.Errorf("could not extract new disk ID from result: %s", result)
}
newDiskID, err := strconv.ParseUint(matches[1], 10, 64)
if err != nil {
return 0, fmt.Errorf("failed to parse new disk ID: %w", err)
}
return newDiskID, nil
}
func isContainsAR(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})

View File

@@ -69,11 +69,11 @@ func utilityDataComputeListCheckPresence(ctx context.Context, d *schema.Resource
if ipAddress, ok := d.GetOk("ip_address"); ok {
req.IPAddress = ipAddress.(string)
}
if stackID, ok := d.GetOk("stack_id"); ok {
req.StackID = stackID.(uint64)
if nodeID, ok := d.GetOk("node_id"); ok {
req.NodeID = nodeID.(uint64)
}
if stackName, ok := d.GetOk("stack_name"); ok {
req.StackName = stackName.(string)
if nodeName, ok := d.GetOk("node_name"); ok {
req.NodeName = nodeName.(string)
}
if cdImageID, ok := d.GetOk("cd_image_id"); ok {
req.CDImageID = cdImageID.(uint64)

View File

@@ -1301,6 +1301,7 @@ func lbResourceSchemaMake() map[string]*schema.Schema {
"start": {
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"desc": {
Type: schema.TypeString,

View File

@@ -43,9 +43,13 @@ import (
func flattenNode(d *schema.ResourceData, item *node.RecordNode) {
log.Debugf("flattenNode: decoded node id %d", d.Get("node_id").(int))
d.Set("auto_start", item.AutoStart)
d.Set("auto_start_count", item.AutoStartCount)
d.Set("consumption", flattenConsumption(item.Consumption))
d.Set("cpu_info", flattenCpuInfo(item.CpuInfo))
d.Set("cpu_allocation_ratio", item.CPUAllocationRatio)
d.Set("mem_allocation_ratio", item.MemAllocationRatio)
d.Set("description", item.Description)
d.Set("dpdk", flattenDPDKItem(item.DPDK))
d.Set("gid", item.GID)
d.Set("ipaddr", item.IPAddr)
@@ -54,12 +58,14 @@ func flattenNode(d *schema.ResourceData, item *node.RecordNode) {
d.Set("need_reboot", item.NeedReboot)
d.Set("net_addr", flattenGetNetAddr(item.NetAddr))
d.Set("network_mode", item.NetworkMode)
d.Set("openvswitch_bridges", item.OpenvSwitchBridges)
d.Set("nic_info", flattenNicInfo(item.NicInfo))
d.Set("numa_topology", flattenNumaTopology(item.NumaTopology))
d.Set("reserved_cpus", flattenNodeItem(item.ReservedCPUs))
d.Set("roles", item.Roles)
d.Set("sdn_hypervisor_name", item.SDNHypervisorName)
d.Set("sriov_enabled", item.SriovEnabled)
d.Set("stack_id", item.StackID)
d.Set("node_id", item.ID)
d.Set("status", item.Status)
d.Set("to_active", flattenRole(item.ToActive))
d.Set("to_installing", flattenRole(item.ToInstalling))
@@ -67,6 +73,7 @@ func flattenNode(d *schema.ResourceData, item *node.RecordNode) {
d.Set("to_restricted", flattenRole(item.ToRestricted))
d.Set("version", item.Version)
d.Set("zone_id", item.ZoneID)
d.Set("usable_cpus", item.UsableCPUs)
}
func flattenConsumption(info node.ConsumptionInfo) []map[string]interface{} {
@@ -115,48 +122,57 @@ func flattenNodeList(nodes *node.ListNodes) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(nodes.Data))
for _, item := range nodes.Data {
temp := map[string]interface{}{
"additional_pkgs": flattenNodeItem(item.AdditionalPkgs),
"cpu_info": flattenCpuInfo(item.CpuInfo),
"description": item.Description,
"dpdk": flattenDPDKItem(item.DPDK),
"gid": item.GID,
"guid": item.GUID,
"hostkey": item.HostKey,
"node_id": item.ID,
"ipaddr": item.IPAddr,
"isolated_cpus": flattenNodeItem(item.IsolatedCpus),
"lastcheck": item.LastCheck,
"machine_guid": item.MachineGUID,
"mainboard_sn": item.MainboardSN,
"memory": item.Memory,
"milestones": item.Milestones,
"model": item.Model,
"name": item.Name,
"need_reboot": item.NeedReboot,
"net_addr": flattenNetAddr(item.NetAddr),
"network_mode": item.NetworkMode,
"nic_info": flattenNicInfo(item.NicInfo),
"node_uuid": item.NodeUUID,
"numa_topology": flattenNumaTopology(item.NumaTopology),
"peer_backup": item.PeerBackup,
"peer_log": item.PeerLog,
"peer_stats": item.PeerStats,
"pgpus": item.Pgpus,
"public_keys": item.PublicKeys,
"release": item.Release,
"reserved_cpus": flattenNodeItem(item.ReservedCPUs),
"roles": item.Roles,
"sdn_hypervisor_name": item.SDNHypervisorName,
"seps": item.Seps,
"serial_num": item.SerialNum,
"sriov_enabled": item.SriovEnabled,
"stack_id": item.StackID,
"status": item.Status,
"tags": item.Tags,
"type": item.Type,
"uefi_firmware_file": item.UEFIFirmwareFile,
"version": item.Version,
"zone_id": item.ZoneID,
"additional_pkgs": flattenNodeItem(item.AdditionalPkgs),
"auto_start": item.AutoStart,
"auto_start_count": item.AutoStartCount,
"cpu_info": flattenCpuInfo(item.CpuInfo),
"description": item.Description,
"dpdk": flattenDPDKItem(item.DPDK),
"gid": item.GID,
"guid": item.GUID,
"hostkey": item.HostKey,
"node_id": item.ID,
"ipaddr": item.IPAddr,
"isolated_cpus": flattenNodeItem(item.IsolatedCpus),
"lastcheck": item.LastCheck,
"machine_guid": item.MachineGUID,
"mainboard_sn": item.MainboardSN,
"memory": item.Memory,
"milestones": item.Milestones,
"model": item.Model,
"name": item.Name,
"need_reboot": item.NeedReboot,
"net_addr": flattenNetAddr(item.NetAddr),
"network_mode": item.NetworkMode,
"nic_info": flattenNicInfo(item.NicInfo),
"node_uuid": item.NodeUUID,
"numa_topology": flattenNumaTopology(item.NumaTopology),
"peer_backup": item.PeerBackup,
"peer_log": item.PeerLog,
"peer_stats": item.PeerStats,
"pgpus": item.Pgpus,
"public_keys": item.PublicKeys,
"release": item.Release,
"reserved_cpus": flattenNodeItem(item.ReservedCPUs),
"roles": item.Roles,
"sdn_hypervisor_name": item.SDNHypervisorName,
"seps": item.Seps,
"serial_num": item.SerialNum,
"sriov_enabled": item.SriovEnabled,
"status": item.Status,
"tags": item.Tags,
"type": item.Type,
"uefi_firmware_file": item.UEFIFirmwareFile,
"usable_cpus": item.UsableCPUs,
"version": item.Version,
"zone_id": item.ZoneID,
"openvswitch_bridges": item.OpenvSwitchBridges,
"api_url": item.APIUrl,
"drivers": item.Drivers,
"old_compat_lvm_id": item.OldCompatLVMID,
"cpu_allocation_ratio": item.CPUAllocationRatio,
"mem_allocation_ratio": item.MemAllocationRatio,
"packages": flattenPackages(item.Packages),
}
res = append(res, temp)
}
@@ -250,6 +266,8 @@ func flattenCpuInfo(info node.CpuInfo) []map[string]interface{} {
"clock_speed": info.ClockSpeed,
"core_count": info.CoreCount,
"phys_count": info.PhysCount,
"flags": info.Flags,
"model_name": info.ModelName,
}
res[0] = temp
return res
@@ -274,6 +292,20 @@ func flattenNodeItem(m []interface{}) []string {
return output
}
func flattenPackages(pkgs map[string]node.PackageInfo) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(pkgs))
for _, p := range pkgs {
temp := map[string]interface{}{
"ver": p.Ver,
"size": p.InstalledSize,
}
res = append(res, temp)
}
return res
}
func flattenDPDKItem(dpdk node.DPDK) []map[string]interface{} {
res := make([]map[string]interface{}, 1)
bridges := make([]map[string]interface{}, 1)

View File

@@ -9,6 +9,14 @@ func dataSourceNodeSchemaMake() map[string]*schema.Schema {
Required: true,
Description: "node id",
},
"auto_start": {
Type: schema.TypeBool,
Computed: true,
},
"auto_start_count": {
Type: schema.TypeInt,
Computed: true,
},
"consumption": {
Type: schema.TypeList,
Computed: true,
@@ -102,11 +110,30 @@ func dataSourceNodeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"flags": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"model_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"cpu_allocation_ratio": {
Type: schema.TypeInt,
Type: schema.TypeFloat,
Computed: true,
},
"mem_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"description": {
Type: schema.TypeString,
Computed: true,
},
"dpdk": {
@@ -203,10 +230,24 @@ func dataSourceNodeSchemaMake() map[string]*schema.Schema {
},
},
},
"usable_cpus": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"network_mode": {
Type: schema.TypeString,
Computed: true,
},
"openvswitch_bridges": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"nic_info": {
Type: schema.TypeList,
Computed: true,
@@ -317,12 +358,12 @@ func dataSourceNodeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
},
},
"sriov_enabled": {
Type: schema.TypeBool,
"sdn_hypervisor_name": {
Type: schema.TypeString,
Computed: true,
},
"stack_id": {
Type: schema.TypeInt,
"sriov_enabled": {
Type: schema.TypeBool,
Computed: true,
},
"status": {
@@ -485,6 +526,22 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
},
},
"api_url": {
Type: schema.TypeString,
Computed: true,
},
"auto_start": {
Type: schema.TypeBool,
Computed: true,
},
"auto_start_count": {
Type: schema.TypeInt,
Computed: true,
},
"cpu_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"cpu_info": {
Type: schema.TypeList,
Computed: true,
@@ -502,6 +559,17 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"flags": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"model_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
@@ -509,6 +577,13 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"drivers": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"dpdk": {
Type: schema.TypeList,
Computed: true,
@@ -600,6 +675,10 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"mem_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"memory": {
Type: schema.TypeInt,
Computed: true,
@@ -662,6 +741,17 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"old_compat_lvm_id": {
Type: schema.TypeInt,
Computed: true,
},
"openvswitch_bridges": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"nic_info": {
Type: schema.TypeList,
Computed: true,
@@ -762,6 +852,22 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
},
},
},
"packages": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"ver": {
Type: schema.TypeString,
Computed: true,
},
"size": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"peer_backup": {
Type: schema.TypeInt,
Computed: true,
@@ -825,10 +931,6 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"stack_id": {
Type: schema.TypeInt,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
@@ -848,6 +950,13 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"usable_cpus": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"version": {
Type: schema.TypeString,
Computed: true,

View File

@@ -4,14 +4,11 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/pcidevice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
)
func flattenPcidevice(d *schema.ResourceData, pd *pcidevice.ItemPCIDevice) {
log.Debugf("flattenPCIDevice: ID %d", pd.ID)
d.Set("ckey", pd.CKey)
d.Set("meta", flattens.FlattenMeta(pd.Meta))
d.Set("compute_id", pd.ComputeID)
d.Set("description", pd.Description)
d.Set("guid", pd.GUID)
@@ -19,7 +16,7 @@ func flattenPcidevice(d *schema.ResourceData, pd *pcidevice.ItemPCIDevice) {
d.Set("device_id", pd.ID)
d.Set("name", pd.Name)
d.Set("rg_id", pd.RGID)
d.Set("stack_id", pd.StackID)
d.Set("node_id", pd.NodeID)
d.Set("status", pd.Status)
d.Set("system_name", pd.SystemName)
}
@@ -30,8 +27,6 @@ func flattenPcideviceList(pl *pcidevice.ListPCIDevices) []map[string]interface{}
res := make([]map[string]interface{}, 0, len(pl.Data))
for _, item := range pl.Data {
temp := map[string]interface{}{
"ckey": item.CKey,
"meta": flattens.FlattenMeta(item.Meta),
"compute_id": item.ComputeID,
"description": item.Description,
"guid": item.GUID,
@@ -39,7 +34,7 @@ func flattenPcideviceList(pl *pcidevice.ListPCIDevices) []map[string]interface{}
"device_id": item.ID,
"rg_id": item.RGID,
"name": item.Name,
"stack_id": item.StackID,
"node_id": item.NodeID,
"status": item.Status,
"system_name": item.SystemName,
}

View File

@@ -44,14 +44,14 @@ import (
func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics {
var errs []error
stackId := uint64(d.Get("stack_id").(int))
nodeId := uint64(d.Get("node_id").(int))
rgId := uint64(d.Get("rg_id").(int))
if err := ic.ExistRG(ctx, rgId, c); err != nil {
errs = append(errs, err)
}
if err := ic.ExistStack(ctx, stackId, c); err != nil {
if err := ic.ExistNode(ctx, nodeId, c); err != nil {
errs = append(errs, err)
}

View File

@@ -56,7 +56,7 @@ func resourcePcideviceCreate(ctx context.Context, d *schema.ResourceData, m inte
return diags
}
createReq.StackID = uint64(d.Get("stack_id").(int))
createReq.NodeID = uint64(d.Get("node_id").(int))
createReq.RGID = uint64(d.Get("rg_id").(int))
createReq.Name = d.Get("name").(string)
createReq.HWPath = d.Get("hw_path").(string)

View File

@@ -11,17 +11,6 @@ func dataSourcePcideviceSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
@@ -46,7 +35,7 @@ func dataSourcePcideviceSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},
@@ -109,17 +98,6 @@ func dataSourcePcideviceListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
@@ -144,7 +122,7 @@ func dataSourcePcideviceListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},
@@ -170,11 +148,11 @@ func dataSourcePcideviceListSchemaMake() map[string]*schema.Schema {
func resourcePcideviceSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "stackId",
Description: "Node ID",
},
"rg_id": {
Type: schema.TypeInt,

View File

@@ -70,7 +70,6 @@ func flattenRgResourceLimits(rl rg.ResourceLimits) []map[string]interface{} {
"cu_dm": rl.CUDM,
"cu_i": rl.CUI,
"cu_m": rl.CUM,
"cu_np": rl.CUNP,
"gpu_units": rl.GPUUnits,
"storage_policy": flattenRgStoragePolicy(rl.StoragePolicies),
}
@@ -160,7 +159,6 @@ func flattenRgUsageResource(d *schema.ResourceData, usage rg.Reservation) {
d.Set("disk_size", usage.DiskSize)
d.Set("disk_size_max", usage.DiskSizeMax)
d.Set("extips", usage.ExtIPs)
d.Set("exttraffic", usage.ExtTraffic)
d.Set("gpu", usage.GPU)
d.Set("ram", usage.RAM)
d.Set("seps", flattenRGSeps(usage.SEPs))
@@ -173,7 +171,6 @@ func flattenResource(resource rg.Reservation) []map[string]interface{} {
"disk_size": resource.DiskSize,
"disk_size_max": resource.DiskSizeMax,
"extips": resource.ExtIPs,
"exttraffic": resource.ExtTraffic,
"gpu": resource.GPU,
"ram": resource.RAM,
"seps": flattenRGSeps(resource.SEPs),

View File

@@ -36,7 +36,6 @@ type ResourceLimits struct {
CUD float64 `json:"CU_D"`
CUI float64 `json:"CU_I"`
CUM float64 `json:"CU_M"`
CUNP float64 `json:"CU_NP"`
GpuUnits float64 `json:"gpu_units"`
}
@@ -124,21 +123,19 @@ type UserAclRecord struct {
}
type QuotaRecord struct { // this is how quota is reported by /api/.../rg/get
Cpu float64 `json:"CU_C"` // CPU count in pcs
Ram float64 `json:"CU_M"` // RAM volume in MB, it is STILL reported as FLOAT
Disk float64 `json:"CU_D"` // Disk capacity in GB
ExtIPs float64 `json:"CU_I"` // Ext IPs count
ExtTraffic float64 `json:"CU_NP"` // Ext network traffic
GpuUnits float64 `json:"gpu_units"` // GPU count
Cpu float64 `json:"CU_C"` // CPU count in pcs
Ram float64 `json:"CU_M"` // RAM volume in MB, it is STILL reported as FLOAT
Disk float64 `json:"CU_D"` // Disk capacity in GB
ExtIPs float64 `json:"CU_I"` // Ext IPs count
GpuUnits float64 `json:"gpu_units"` // GPU count
}
type ResourceRecord struct { // this is how actual usage is reported by /api/.../rg/get
Cpu int `json:"cpu"`
Disk int `json:"disksize"`
ExtIPs int `json:"extips"`
ExtTraffic int `json:"exttraffic"`
Gpu int `json:"gpu"`
Ram int `json:"ram"`
Cpu int `json:"cpu"`
Disk int `json:"disksize"`
ExtIPs int `json:"extips"`
Gpu int `json:"gpu"`
Ram int `json:"ram"`
}
type UsageRecord struct {

View File

@@ -38,12 +38,11 @@ import (
func makeQuotaRecord(arg_list []interface{}) QuotaRecord {
quota := QuotaRecord{
Cpu: -1,
Ram: -1., // this is float64, but may change in the future
Disk: -1,
ExtTraffic: -1,
ExtIPs: -1,
GpuUnits: -1,
Cpu: -1,
Ram: -1., // this is float64, but may change in the future
Disk: -1,
ExtIPs: -1,
GpuUnits: -1,
}
subres_data := arg_list[0].(map[string]interface{})
@@ -59,10 +58,6 @@ func makeQuotaRecord(arg_list []interface{}) QuotaRecord {
quota.Ram = subres_data["ram"].(float64)
}
if subres_data["ext_traffic"].(int) > 0 {
quota.ExtTraffic = subres_data["ext_traffic"].(float64)
}
if subres_data["ext_ips"].(int) > 0 {
quota.ExtIPs = subres_data["ext_ips"].(float64)
}
@@ -80,7 +75,6 @@ func parseQuota(quota rg.ResourceLimits) []interface{} {
quota_map["cpu"] = quota.CUC
quota_map["ram"] = quota.CUM
quota_map["disk"] = quota.CuD
quota_map["ext_traffic"] = quota.CUNP
quota_map["ext_ips"] = quota.CUI
quota_map["gpu_units"] = quota.GPUUnits

View File

@@ -99,14 +99,6 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
req.MaxNumPublicIP = maxNumPublicIP
}
}
if resLimitsConv["cu_np"] != nil {
maxNP := int64(resLimitsConv["cu_np"].(float64))
if maxNP == 0 {
req.MaxNetworkPeerTransfer = -1
} else {
req.MaxNetworkPeerTransfer = maxNP
}
}
}
if owner, ok := d.GetOk("owner"); ok {
@@ -423,14 +415,6 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
req.MaxNumPublicIP = maxNumPublicIP
}
}
if resLimitsConv["cu_np"] != nil {
maxNP := int64(resLimitsConv["cu_np"].(float64))
if maxNP == 0 {
req.MaxNetworkPeerTransfer = -1
} else {
req.MaxNetworkPeerTransfer = maxNP
}
}
doGeneralUpdate = true
}

View File

@@ -144,10 +144,6 @@ func dataSourceRgSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
@@ -435,10 +431,6 @@ func dataSourceRGResourceConsumptionGetSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -495,10 +487,6 @@ func dataSourceRGResourceConsumptionGetSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -559,10 +547,6 @@ func dataSourceRGResourceConsumptionGetSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
@@ -607,10 +591,6 @@ func dataSourceRGResourceConsumptionListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -667,10 +647,6 @@ func dataSourceRGResourceConsumptionListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -731,10 +707,6 @@ func dataSourceRGResourceConsumptionListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
@@ -776,10 +748,6 @@ func dataSourceRgUsageSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
@@ -1669,10 +1637,6 @@ func dataSourceRgListDeletedSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
@@ -1967,10 +1931,6 @@ func dataSourceRgListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
@@ -2309,12 +2269,6 @@ func resourceRgSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "MaxMemoryCapacity",
},
"cu_np": {
Type: schema.TypeFloat,
Optional: true,
Computed: true,
Description: "MaxNetworkPeerTransfer",
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,

View File

@@ -69,20 +69,6 @@ func resourceSepCreate(ctx context.Context, d *schema.ResourceData, m interface{
req.Enable = enable.(bool)
}
var consumedNIDs []uint64
for _, item := range d.Get("consumed_by").(*schema.Set).List() {
consumedNIDs = append(consumedNIDs, uint64(item.(int)))
}
req.ConsumerNIDs = consumedNIDs
var providerNIDs []uint64
for _, item := range d.Get("provided_by").([]interface{}) {
providerNIDs = append(providerNIDs, uint64(item.(int)))
}
req.ProviderNIDs = providerNIDs
sepId, err := c.CloudBroker().SEP().Create(ctx, req)
if err != nil {
d.SetId("")
@@ -126,6 +112,22 @@ func resourceSepCreate(ctx context.Context, d *schema.ResourceData, m interface{
}
}
if consumedBy, ok := d.GetOk("consumed_by"); ok && consumedBy.(*schema.Set).Len() > 0 {
log.Debugf("resourceSepCreate, consumed_by: consumed_by=%v sep_id %d after completing its resource configuration", consumedBy, sepId)
err := resourceSepAddConsumerNodes(ctx, d, m)
if err != nil {
warnings.Add(err)
}
}
if providedBy, ok := d.GetOk("provided_by"); ok && len(providedBy.([]interface{})) > 0 {
log.Debugf("resourceSepCreate, provided_by: provided_by=%v sep_id %d after completing its resource configuration", providedBy, sepId)
err := resourceSepAddProviderNodes(ctx, d, m)
if err != nil {
warnings.Add(err)
}
}
return append(resourceSepRead(ctx, d, m), warnings.Get()...)
}
@@ -483,6 +485,52 @@ func resourceSepUpdateProviders(ctx context.Context, d *schema.ResourceData, m i
return nil
}
func resourceSepAddConsumerNodes(ctx context.Context, d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceSepAddConsumerNodes: called for %s, id: %d", d.Get("name").(string), d.Get("sep_id").(int))
c := m.(*controller.ControllerCfg)
var consumerNIDs []uint64
for _, item := range d.Get("consumed_by").(*schema.Set).List() {
consumerNIDs = append(consumerNIDs, uint64(item.(int)))
}
if len(consumerNIDs) > 0 {
reqAdd := sep.AddConsumerNodesRequest{
SEPID: uint64(d.Get("sep_id").(int)),
ConsumerNIDs: consumerNIDs,
}
_, err := c.CloudBroker().SEP().AddConsumerNodes(ctx, reqAdd)
if err != nil {
return err
}
}
return nil
}
func resourceSepAddProviderNodes(ctx context.Context, d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceSepAddProviderNodes: called for %s, id: %d", d.Get("name").(string), d.Get("sep_id").(int))
c := m.(*controller.ControllerCfg)
var providerNIDs []uint64
for _, item := range d.Get("provided_by").([]interface{}) {
providerNIDs = append(providerNIDs, uint64(item.(int)))
}
if len(providerNIDs) > 0 {
reqAdd := sep.AddProviderNodesRequest{
SEPID: uint64(d.Get("sep_id").(int)),
ProviderNIDs: providerNIDs,
}
_, err := c.CloudBroker().SEP().AddProviderNodes(ctx, reqAdd)
if err != nil {
return err
}
}
return nil
}
func ResourceSep() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

View File

@@ -109,7 +109,7 @@ func dataSourceSepTemplateSchemaMake() map[string]*schema.Schema {
"sep_type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"hitachi", "dorado", "tatlin", "shared", "local", "des"}, false),
ValidateFunc: validation.StringInSlice([]string{"des", "hitachi", "dorado", "tatlin", "shared", "local", "ustor"}, false),
Description: "type of sep",
},
"lang": {
@@ -305,6 +305,14 @@ func dataSourceSepListSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "page size",
},
"sep_ids": {
Type: schema.TypeList,
Optional: true,
Description: "sort by list of SEP identifiers",
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"items": {
Type: schema.TypeList,
Computed: true,
@@ -503,9 +511,10 @@ func resourceSepSchemaMake() map[string]*schema.Schema {
Description: "SEP name",
},
"type": {
Type: schema.TypeString,
Required: true,
Description: "type of storage",
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"des", "hitachi", "dorado", "tatlin", "shared", "local", "ustor"}, false),
Description: "type of storage",
},
"access_to_pool": {
Type: schema.TypeSet,

View File

@@ -76,6 +76,12 @@ func utilitySepListCheckPresence(ctx context.Context, d *schema.ResourceData, m
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
if sepIds, ok := d.GetOk("sep_ids"); ok {
ids := sepIds.([]interface{})
for _, id := range ids {
req.SepIDs = append(req.SepIDs, uint64(id.(int)))
}
}
log.Debugf("utilitySepListCheckPresence: load image list")
sepList, err := c.CloudBroker().SEP().List(ctx, req)

View File

@@ -1,72 +0,0 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Sergey Kisil, <svkisil@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package stack
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceStackListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
stackList, err := utilityStackListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenStacksList(stackList))
d.Set("entry_count", stackList.EntryCount)
return nil
}
func DataSourceStacksList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceStackListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceStacksListSchemaMake(),
}
}

View File

@@ -1,69 +0,0 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package stack
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceStackRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
stack, err := utilityStackCheckPresence(ctx, d, m)
if err != nil {
d.SetId("") // ensure ID is empty in this case
return diag.FromErr(err)
}
flattenStack(d, stack)
d.SetId(strconv.Itoa(d.Get("stack_id").(int)))
return nil
}
func DataSourceStack() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceStackRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceStackSchemaMake(),
}
}

View File

@@ -1,204 +0,0 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Sergey Kisil, <svkisil@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package stack
import (
log "github.com/sirupsen/logrus"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/stack"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
)
func flattenStack(d *schema.ResourceData, st *stack.InfoStack) {
log.Debugf("flattenStack: decoded Stack name %q / ID %d",
st.Name, st.ID)
d.Set("ckey", st.Ckey)
d.Set("meta", flattens.FlattenMeta(st.Meta))
d.Set("api_url", st.APIURL)
d.Set("api_key", st.Apikey)
d.Set("app_id", st.AppID)
d.Set("cpu_allocation_ratio", st.CPUAllocationRatio)
d.Set("description", st.Description)
d.Set("descr", st.Descr)
d.Set("drivers", st.Drivers)
d.Set("eco", flattenEco(st.Eco))
d.Set("error", st.Error)
d.Set("gid", st.GID)
d.Set("guid", st.GUID)
d.Set("stack_id", st.ID)
d.Set("images", st.Images)
d.Set("login", st.Login)
d.Set("mem_allocation_ratio", st.MemAllocationRatio)
d.Set("name", st.Name)
d.Set("packages", flattenPackages(st.Packages))
d.Set("passwd", st.Password)
d.Set("reference_id", st.ReferenceID)
d.Set("status", st.Status)
d.Set("type", st.Type)
}
func flattenPackages(pg stack.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"libvirt_bin": flattenLibvirtBin(pg),
"libvirt_daemon": flattenLibvirtDaemon(pg),
"lvm2_lockd": flattenLvm2Lockd(pg),
"openvswitch_common": flattenOpenvswitchCommon(pg),
"openvswitch_switch": flattenOpenvswitchSwitch(pg),
"qemu_system_x86": flattenQemuSystemX86(pg),
"sanlock": flattenSanlock(pg),
}
res = append(res, temp)
return res
}
func flattenLibvirtBin(lb stack.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": lb.LibvirtBin.InstalledSize,
"ver": lb.LibvirtBin.Ver,
}
res = append(res, temp)
return res
}
func flattenLibvirtDaemon(ld stack.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": ld.LibvirtDaemon.InstalledSize,
"ver": ld.LibvirtDaemon.Ver,
}
res = append(res, temp)
return res
}
func flattenLvm2Lockd(ll stack.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": ll.Lvm2Lockd.InstalledSize,
"ver": ll.Lvm2Lockd.Ver,
}
res = append(res, temp)
return res
}
func flattenOpenvswitchCommon(oc stack.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": oc.OpenvswitchCommon.InstalledSize,
"ver": oc.OpenvswitchCommon.Ver,
}
res = append(res, temp)
return res
}
func flattenOpenvswitchSwitch(os stack.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": os.OpenvswitchSwitch.InstalledSize,
"ver": os.OpenvswitchSwitch.Ver,
}
res = append(res, temp)
return res
}
func flattenQemuSystemX86(qs stack.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": qs.QemuSystemX86.InstalledSize,
"ver": qs.QemuSystemX86.Ver,
}
res = append(res, temp)
return res
}
func flattenSanlock(sl stack.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": sl.Sanlock.InstalledSize,
"ver": sl.Sanlock.Ver,
}
res = append(res, temp)
return res
}
func flattenEco(m interface{}) string {
switch d := m.(type) {
case string:
return d
case int:
return strconv.Itoa(d)
case int64:
return strconv.FormatInt(d, 10)
case float64:
return strconv.FormatInt(int64(d), 10)
default:
return ""
}
}
func flattenStacksList(sl *stack.ListStacks) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(sl.Data))
for _, item := range sl.Data {
temp := map[string]interface{}{
"ckey": item.Ckey,
"meta": flattens.FlattenMeta(item.Meta),
"api_url": item.APIURL,
"api_key": item.Apikey,
"app_id": item.AppID,
"cpu_allocation_ratio": item.CPUAllocationRatio,
"description": item.Description,
"descr": item.Descr,
"drivers": item.Drivers,
"eco": flattenEco(item.Eco),
"error": item.Error,
"gid": item.GID,
"guid": item.GUID,
"stack_id": item.ID,
"images": item.Images,
"login": item.Login,
"mem_allocation_ratio": item.MemAllocationRatio,
"name": item.Name,
"packages": flattenPackages(item.Packages),
"passwd": item.Password,
"reference_id": item.ReferenceID,
"status": item.Status,
"type": item.Type,
}
res = append(res, temp)
}
return res
}

View File

@@ -1,582 +0,0 @@
package stack
import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
func dataSourceStackSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"stack_id": {
Type: schema.TypeInt,
Required: true,
Description: "stack_id",
},
"ckey": {
Type: schema.TypeString,
Computed: true,
Description: "ckey",
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"api_url": {
Type: schema.TypeString,
Computed: true,
Description: "api_url",
},
"api_key": {
Type: schema.TypeString,
Computed: true,
Description: "api_key",
},
"app_id": {
Type: schema.TypeString,
Computed: true,
Description: "api_id",
},
"cpu_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
Description: "cpu_allocation_ratio",
},
"description": {
Type: schema.TypeString,
Computed: true,
Description: "description",
},
"descr": {
Type: schema.TypeString,
Computed: true,
Description: "descr",
},
"drivers": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "drivers",
},
"eco": {
Type: schema.TypeString,
Computed: true,
Description: "eco",
},
"error": {
Type: schema.TypeInt,
Computed: true,
Description: "error",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "gid",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "guid",
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "images",
},
"login": {
Type: schema.TypeString,
Computed: true,
Description: "login",
},
"mem_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
Description: "mem_allocation_ratio",
},
"name": {
Type: schema.TypeString,
Computed: true,
Description: "name",
},
"packages": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"libvirt_bin": {
Type: schema.TypeList,
Computed: true,
Description: "libvirt_bin",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"libvirt_daemon": {
Type: schema.TypeList,
Computed: true,
Description: "libvirt_daemon",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"lvm2_lockd": {
Type: schema.TypeList,
Computed: true,
Description: "lvm2_lockd",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"openvswitch_common": {
Type: schema.TypeList,
Computed: true,
Description: "openvswitch_common",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"openvswitch_switch": {
Type: schema.TypeList,
Computed: true,
Description: "openvswitch_switch",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"qemu_system_x86": {
Type: schema.TypeList,
Computed: true,
Description: "qemu_system_x86",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"sanlock": {
Type: schema.TypeList,
Computed: true,
Description: "sanlock",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
},
},
},
"passwd": {
Type: schema.TypeString,
Computed: true,
Description: "password",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
Description: "reference_id",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "status",
},
"type": {
Type: schema.TypeString,
Computed: true,
Description: "type",
},
}
}
func dataSourceStacksListSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "by_id",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "name",
},
"type": {
Type: schema.TypeString,
Optional: true,
Description: "type",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "type",
},
"sort_by": {
Type: schema.TypeString,
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"stack_id": {
Type: schema.TypeInt,
Required: true,
Description: "stack_id",
},
"ckey": {
Type: schema.TypeString,
Computed: true,
Description: "ckey",
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"api_url": {
Type: schema.TypeString,
Computed: true,
Description: "api_url",
},
"api_key": {
Type: schema.TypeString,
Computed: true,
Description: "api_key",
},
"app_id": {
Type: schema.TypeString,
Computed: true,
Description: "api_id",
},
"cpu_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
Description: "cpu_allocation_ratio",
},
"description": {
Type: schema.TypeString,
Computed: true,
Description: "description",
},
"descr": {
Type: schema.TypeString,
Computed: true,
Description: "descr",
},
"drivers": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "drivers",
},
"eco": {
Type: schema.TypeString,
Computed: true,
Description: "eco",
},
"error": {
Type: schema.TypeInt,
Computed: true,
Description: "error",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "gid",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "guid",
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "images",
},
"login": {
Type: schema.TypeString,
Computed: true,
Description: "login",
},
"mem_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
Description: "mem_allocation_ratio",
},
"name": {
Type: schema.TypeString,
Computed: true,
Description: "name",
},
"packages": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"libvirt_bin": {
Type: schema.TypeList,
Computed: true,
Description: "libvirt_bin",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"libvirt_daemon": {
Type: schema.TypeList,
Computed: true,
Description: "libvirt_daemon",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"lvm2_lockd": {
Type: schema.TypeList,
Computed: true,
Description: "lvm2_lockd",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"openvswitch_common": {
Type: schema.TypeList,
Computed: true,
Description: "openvswitch_common",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"openvswitch_switch": {
Type: schema.TypeList,
Computed: true,
Description: "openvswitch_switch",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"qemu_system_x86": {
Type: schema.TypeList,
Computed: true,
Description: "qemu_system_x86",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
"sanlock": {
Type: schema.TypeList,
Computed: true,
Description: "sanlock",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"installed_size": {
Type: schema.TypeString,
Computed: true,
Description: "installed_size",
},
"ver": {
Type: schema.TypeString,
Computed: true,
Description: "ver",
},
},
},
},
},
},
},
"passwd": {
Type: schema.TypeString,
Computed: true,
Description: "password",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
Description: "reference_id",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "status",
},
"type": {
Type: schema.TypeString,
Computed: true,
Description: "type",
},
},
},
Description: "items of stacks list",
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
Description: "entry_count",
},
}
}

View File

@@ -1,64 +0,0 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Sergey Kisil, <svkisil@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package stack
import (
"context"
"strconv"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/stack"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityStackCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*stack.InfoStack, error) {
c := m.(*controller.ControllerCfg)
req := stack.GetRequest{}
if d.Id() != "" {
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.StackId = id
} else {
req.StackId = uint64(d.Get("stack_id").(int))
}
log.Debugf("utilityStackCheckPresence: load stack")
stackInfo, err := c.CloudBroker().Stack().Get(ctx, req)
if err != nil {
return nil, err
}
return stackInfo, nil
}

View File

@@ -1,78 +0,0 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Sergey Kisil, <svkisil@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package stack
import (
"context"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/stack"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityStackListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*stack.ListStacks, error) {
c := m.(*controller.ControllerCfg)
req := stack.ListRequest{}
if ByID, ok := d.GetOk("by_id"); ok {
req.ByID = uint64(ByID.(int))
}
if Name, ok := d.GetOk("name"); ok {
req.Name = Name.(string)
}
if Type, ok := d.GetOk("type"); ok {
req.Type = Type.(string)
}
if Status, ok := d.GetOk("status"); ok {
req.Status = Status.(string)
}
if sortBy, ok := d.GetOk("sort_by"); ok {
req.SortBy = sortBy.(string)
}
if Page, ok := d.GetOk("page"); ok {
req.Page = uint64(Page.(int))
}
if Size, ok := d.GetOk("size"); ok {
req.Size = uint64(Size.(int))
}
log.Debugf("utilityStackListCheckPresence: load stack list")
stackList, err := c.CloudBroker().Stack().List(ctx, req)
if err != nil {
return nil, err
}
return stackList, nil
}

View File

@@ -20,9 +20,10 @@ func flattenAccessSEPPools(accessSEPPools stpolicy.ListAccessSEPPools) []map[str
res := make([]map[string]interface{}, 0, len(accessSEPPools))
for _, asp := range accessSEPPools {
temp := map[string]interface{}{
"sep_id": asp.SEPID,
"sep_name": asp.Name,
"pool_names": asp.PoolNames,
"sep_id": asp.SEPID,
"sep_name": asp.Name,
"pool_names": asp.PoolNames,
"sep_tech_status": asp.SepTechStatus,
}
res = append(res, temp)

View File

@@ -1,6 +1,9 @@
package stpolicy
import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
func resourceStoragePolicySchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
@@ -21,6 +24,10 @@ func resourceStoragePolicySchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Required: true,
},
"sep_name": {
Type: schema.TypeString,
Computed: true,
},
"sep_id": {
Type: schema.TypeInt,
Required: true,
@@ -114,6 +121,10 @@ func dataSourceStoragePolicySchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
},
},
"sep_name": {
Type: schema.TypeString,
Computed: true,
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
@@ -194,6 +205,11 @@ func dataSourceStoragePolicyListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Optional: true,
},
"sep_tech_status": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"ENABLED", "DISABLED"}, true),
},
"pool_name": {
Type: schema.TypeString,
Optional: true,
@@ -240,11 +256,15 @@ func dataSourceStoragePolicyListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
},
},
"sep_name": {
Type: schema.TypeString,
Computed: true,
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
},
"sep_name": {
"sep_tech_status": {
Type: schema.TypeString,
Computed: true,
},

View File

@@ -55,6 +55,10 @@ func utilityStoragePolicyListCheckPresence(ctx context.Context, d *schema.Resour
req.SepID = uint64(SEPID.(int))
}
if SEPtechstatus, ok := d.GetOk("sep_tech_status"); ok {
req.SepTechStatus = SEPtechstatus.(string)
}
if poolName, ok := d.GetOk("pool_name"); ok {
req.PoolName = poolName.(string)
}

View File

@@ -40,6 +40,7 @@ func flattenTrunk(d *schema.ResourceData, trunkItem *trunk.ItemTrunk) {
d.Set("account_ids", trunkItem.AccountIDs)
d.Set("ovs_bridge", trunkItem.OVSBridge)
d.Set("native_vlan_id", trunkItem.NativeVLANID)
d.Set("mtu", trunkItem.MTU)
d.Set("status", trunkItem.Status)
d.Set("trunk_tags", trunkItem.TrunkTags)
d.Set("created_at", trunkItem.CreatedAt)
@@ -66,6 +67,7 @@ func flattenTrunkList(trunkList *trunk.ListTrunks) []map[string]interface{} {
"name": trunkItem.Name,
"native_vlan_id": trunkItem.NativeVLANID,
"ovs_bridge": trunkItem.OVSBridge,
"mtu": trunkItem.MTU,
"status": trunkItem.Status,
"trunk_tags": trunkItem.TrunkTags,
"updated_at": trunkItem.UpdatedAt,

View File

@@ -37,6 +37,10 @@ func resourceTrunkCreate(ctx context.Context, d *schema.ResourceData, m interfac
createReq.Description = description.(string)
}
if mtu, ok := d.GetOk("mtu"); ok {
createReq.MTU = uint64(mtu.(int))
}
accountIDs := make([]uint64, 0)
if accountAccess, ok := d.GetOk("account_ids"); ok {
@@ -108,11 +112,19 @@ func resourceTrunkUpdate(ctx context.Context, d *schema.ResourceData, m interfac
return diag.FromErr(err)
}
if d.HasChanges("name", "trunk_tags", "description", "native_vlan_id") {
if d.HasChanges("name", "trunk_tags", "description", "native_vlan_id", "mtu") {
req := trunk.UpdateRequest{
TrunkID: uint64(id),
Name: d.Get("name").(string),
TrunkTags: d.Get("trunk_tags").(string),
TrunkID: uint64(id),
}
if d.HasChange("name") {
name := d.Get("name").(string)
req.Name = name
}
if d.HasChange("trunk_tags") {
trunkTags := d.Get("trunk_tags").(string)
req.TrunkTags = trunkTags
}
if d.HasChange("description") {
@@ -125,6 +137,11 @@ func resourceTrunkUpdate(ctx context.Context, d *schema.ResourceData, m interfac
req.NativeVLANID = nativeVLANID
}
if d.HasChange("mtu") {
mtu := d.Get("mtu").(int)
req.MTU = uint64(mtu)
}
if _, err := c.CloudBroker().Trunk().Update(ctx, req); err != nil {
return diag.FromErr(err)
}

View File

@@ -62,6 +62,12 @@ func resourceTrunkSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "MAC address",
},
"mtu": {
Type: schema.TypeInt,
Optional: true,
Default: 1500,
Description: "Maximum Transmission Unit",
},
"status": {
Type: schema.TypeString,
Computed: true,
@@ -147,6 +153,11 @@ func dataSourceTrunkSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Native VLAN ID",
},
"mtu": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum Transmission Unit",
},
"status": {
Type: schema.TypeString,
Computed: true,
@@ -303,6 +314,11 @@ func dataSourceTrunkListSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "OVS bridge name",
},
"mtu": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum Transmission Unit",
},
"status": {
Type: schema.TypeString,
Computed: true,

View File

@@ -85,7 +85,7 @@ func flattenUserResource(d *schema.ResourceData, details *user.ItemUser) {
d.Set("data", details.Data)
d.Set("description", details.Description)
d.Set("domain", details.Domain)
d.Set("emailaddress", details.Emails)
d.Set("emailaddress", details.Emails[0])
d.Set("gid", details.GID)
d.Set("groups", details.Groups)
d.Set("guid", details.GUID)

View File

@@ -0,0 +1,156 @@
package user
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
func resourceUserV1() *schema.Resource {
return &schema.Resource{
Schema: map[string]*schema.Schema{
"username": {
Type: schema.TypeString,
Required: true,
Description: "ID of user",
},
"emailaddress": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "Email address of the user",
},
"password": {
Type: schema.TypeString,
Default: "strongpassword",
Optional: true,
Description: "password of user",
},
"provider_name": {
Type: schema.TypeString,
Optional: true,
Description: "provider",
ValidateFunc: validation.StringInSlice([]string{"bvs", "decs3o"}, false),
},
"groups": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "list of groups this user belongs to",
},
"ckey": {
Type: schema.TypeString,
Computed: true,
Description: "ckey",
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"active": {
Type: schema.TypeBool,
Computed: true,
Description: "active",
},
"apiaccess": {
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "list of apiaccess groups this user belongs to",
},
"blocked": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "is the user blocked",
},
"authkey": {
Type: schema.TypeString,
Computed: true,
Description: "authkey",
},
"auth_keys": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "authkeys",
},
"data": {
Type: schema.TypeString,
Computed: true,
Description: "data",
},
"description": {
Type: schema.TypeString,
Computed: true,
Description: "description",
},
"domain": {
Type: schema.TypeString,
Computed: true,
Description: "domain",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "gid",
},
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "guid",
},
"last_check": {
Type: schema.TypeInt,
Computed: true,
Description: "last_check",
},
"mobile": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "mobile",
},
"protected": {
Type: schema.TypeBool,
Computed: true,
Description: "protected",
},
"roles": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "roles",
},
"service_account": {
Type: schema.TypeBool,
Computed: true,
Description: "service_account",
},
"xmpp": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "xmpp",
},
},
}
}

View File

@@ -19,18 +19,11 @@ func resourceUserCreate(ctx context.Context, d *schema.ResourceData, m interface
username := d.Get("username").(string)
emails := d.Get("emailaddress").([]interface{})
emailAddress := make([]string, 0, len(emails))
for _, v := range emails {
emailAddress = append(emailAddress, v.(string))
}
email := d.Get("emailaddress").(string)
createReq := user.CreateRequest{
Username: username,
EmailAddress: emailAddress,
EmailAddress: email,
}
if passwd, ok := d.GetOk("password"); ok {
@@ -144,7 +137,7 @@ func resourceUserDelete(ctx context.Context, d *schema.ResourceData, m interface
func ResourceUser() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
SchemaVersion: 2,
CreateContext: resourceUserCreate,
ReadContext: resourceUserRead,
@@ -163,6 +156,14 @@ func ResourceUser() *schema.Resource {
Default: &constants.Timeout600s,
},
StateUpgraders: []schema.StateUpgrader{
{
Version: 1,
Type: resourceUserV1().CoreConfigSchema().ImpliedType(),
Upgrade: resourceUserUpgradeV1,
},
},
Schema: resourceUserSchemaMake(),
}
}

View File

@@ -167,6 +167,11 @@ func dataSourceUserGetAuditSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "find all audits after point in time (unixtime)",
},
"sort_by": {
Type: schema.TypeString,
Optional: true,
Description: "sort by one of supported fields, format ±",
},
"timestamp_to": {
Type: schema.TypeInt,
Optional: true,
@@ -416,12 +421,9 @@ func resourceUserSchemaMake() map[string]*schema.Schema {
Description: "ID of user",
},
"emailaddress": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "email addresses of the user",
Type: schema.TypeString,
Required: true,
Description: "email address of the user",
},
"password": {
Type: schema.TypeString,

View File

@@ -0,0 +1,20 @@
package user
import (
"context"
log "github.com/sirupsen/logrus"
)
func resourceUserUpgradeV1(ctx context.Context, rawState map[string]interface{}, meta any) (map[string]interface{}, error) {
log.Debug("resourceUserUpgradeV1: upgrading state from list to string")
if oldVal, ok := rawState["emailaddress"].([]interface{}); ok && len(oldVal) > 0 {
if firstEmail, ok := oldVal[0].(string); ok {
rawState["emailaddress"] = firstEmail
log.Debugf("resourceUserUpgradeV1: converted emailaddress from list to string: %s", firstEmail)
}
}
return rawState, nil
}

View File

@@ -60,6 +60,9 @@ func utilityUserGetAuditCheckPresence(ctx context.Context, d *schema.ResourceDat
if timestampTo, ok := d.GetOk("timestamp_to"); ok {
req.TimestampTo = uint64(timestampTo.(int))
}
if sortBy, ok := d.GetOk("sort_by"); ok {
req.SortBy = sortBy.(string)
}
if Page, ok := d.GetOk("page"); ok {
req.Page = uint64(Page.(int))

View File

@@ -156,7 +156,7 @@ func flattenVinsVNFDev(vd vins.VNFDev) []map[string]interface{} {
"type": vd.Type,
"vnc_password": vd.VNCPassword,
"vins": vd.VINS,
"zone_id": vd.ZoneID,
"zone_id": vd.ZoneID,
}
res = append(res, temp)
return res
@@ -416,10 +416,10 @@ func flattenVinsMGMT(m vins.MGMT) []map[string]interface{} {
func flattenVinsResources(r vins.Resources) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"cpu": r.CPU,
"ram": r.RAM,
"stack_id": r.StackID,
"uuid": r.UUID,
"cpu": r.CPU,
"ram": r.RAM,
"node_id": r.NodeID,
"uuid": r.UUID,
}
res = append(res, temp)
return res

View File

@@ -94,10 +94,10 @@ func dataSourceVinsSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "ram",
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
Description: "stack id",
Description: "node id",
},
"uuid": {
Type: schema.TypeString,
@@ -2322,10 +2322,10 @@ func resourceVinsSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "ram",
},
"stack_id": {
"node_id": {
Type: schema.TypeInt,
Computed: true,
Description: "stack id",
Description: "node id",
},
"uuid": {
Type: schema.TypeString,

View File

@@ -58,6 +58,10 @@ func dataSourceZoneSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"auto_start": {
Type: schema.TypeBool,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -115,6 +115,10 @@ func dataSourceZoneListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"auto_start": {
Type: schema.TypeBool,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -59,6 +59,7 @@ func flattenZone(d *schema.ResourceData, item *zone.RecordZone) error {
d.Set("lb_ids", item.LBIDs)
d.Set("bservice_ids", item.BserviceIDs)
d.Set("k8s_ids", item.K8SIDs)
d.Set("auto_start", item.AutoStart)
log.Debugf("flattenZone: decoded RecordZone name %q / ID %d, complete",
item.Name, item.ID)
@@ -80,6 +81,7 @@ func flattenZoneList(zone *zone.ListZones) []map[string]interface{} {
"created_time": zone.CreatedTime,
"updated_time": zone.UpdatedTime,
"node_ids": zone.NodeIDs,
"auto_start": zone.AutoStart,
}
res = append(res, temp)
}

View File

@@ -56,6 +56,10 @@ func resourceZoneCreate(ctx context.Context, d *schema.ResourceData, m interface
Name: zoneName,
}
if aS, ok := d.GetOk("auto_start"); ok {
req.AutoStart = aS.(bool)
}
if desc, ok := d.GetOk("description"); ok {
req.Description = desc.(string)
}
@@ -113,7 +117,7 @@ func resourceZoneUpdate(ctx context.Context, d *schema.ResourceData, m interface
log.Debugf("resourceZoneUpdate: called Zone with id %d", zoneID)
if d.HasChanges("name", "description", "node_ids") {
if d.HasChanges("name", "description", "node_ids", "auto_start") {
if err := utilityZoneUpdate(ctx, d, m, zoneID); err != nil {
return diag.FromErr(err)
}
@@ -193,6 +197,11 @@ func resourceZoneSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
},
},
"auto_start": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"zone_id": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -65,7 +65,7 @@ func utilityZoneCheckPresence(ctx context.Context, d *schema.ResourceData, m int
func utilityZoneUpdate(ctx context.Context, d *schema.ResourceData, m interface{}, zoneID uint64) error {
c := m.(*controller.ControllerCfg)
if d.HasChanges("name", "description") {
if d.HasChanges("name", "description", "auto_start") {
req := zone.UpdateRequest{
ID: zoneID,
}
@@ -77,6 +77,10 @@ func utilityZoneUpdate(ctx context.Context, d *schema.ResourceData, m interface{
req.Description = d.Get("description").(string)
}
if d.HasChange("auto_start") {
req.AutoStart = d.Get("auto_start").(bool)
}
_, err := c.CloudBroker().Zone().Update(ctx, req)
if err != nil {
return err