This commit is contained in:
2025-12-22 16:34:04 +03:00
parent e42fbcef39
commit 67830db3eb
28 changed files with 432 additions and 368 deletions

View File

@@ -45,6 +45,7 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute, p
d.Set("clone_reference", computeRec.CloneReference)
d.Set("clones", computeRec.Clones)
d.Set("computeci_id", computeRec.ComputeCIID)
d.Set("cpu", computeRec.CPUs)
d.Set("created_by", computeRec.CreatedBy)
d.Set("created_time", computeRec.CreatedTime)
d.Set("custom_fields", string(customFields))
@@ -64,6 +65,7 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute, p
d.Set("image_name", computeRec.ImageName)
d.Set("interfaces", flattenInterfaces(computeRec.Interfaces))
d.Set("lock_status", computeRec.LockStatus)
d.Set("loader_meta_iso", flattenLoaderMetaIso(computeRec.LoaderMetaIso))
d.Set("manager_id", computeRec.ManagerID)
d.Set("manager_type", computeRec.ManagerType)
d.Set("migrationjob", computeRec.MigrationJob)
@@ -78,14 +80,17 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute, p
d.Set("os_users", flattenOSUsers(computeRec.OSUsers))
d.Set("pinned", computeRec.PinnedToStack)
d.Set("preferred_cpu", computeRec.PreferredCPU)
d.Set("ram", computeRec.RAM)
d.Set("reference_id", computeRec.ReferenceID)
d.Set("registered", computeRec.Registered)
d.Set("res_name", computeRec.ResName)
d.Set("reserved_node_cpus", computeRec.ReservedNodeCpus)
d.Set("rg_name", computeRec.RGName)
d.Set("rg_id", computeRec.RGID)
d.Set("snap_sets", flattenSnapSets(computeRec.SnapSets))
d.Set("stack_id", computeRec.StackID)
d.Set("stack_name", computeRec.StackName)
d.Set("started", computeRec.TechStatus == "STARTED")
d.Set("stateless_sep_id", computeRec.StatelessSEPID)
d.Set("stateless_sep_type", computeRec.StatelessSEPType)
d.Set("status", computeRec.Status)
@@ -227,10 +232,6 @@ func flattenQOS(qos compute.QOS) []map[string]interface{} {
func flattenComputeDisks(disksList compute.ListDisks, disksBlocks, extraDisks []interface{}, bootDiskId uint64) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(disksList))
if len(disksBlocks) == 0 {
return res
}
sort.Slice(disksList, func(i, j int) bool {
return disksList[i].ID < disksList[j].ID
})
@@ -242,8 +243,25 @@ func flattenComputeDisks(disksList compute.ListDisks, disksBlocks, extraDisks []
continue
}
pernamentlyValue := disksBlocks[indexDataDisks].(map[string]interface{})["permanently"].(bool)
nodeIds := disksBlocks[indexDataDisks].(map[string]interface{})["node_ids"].(*schema.Set)
var pernamentlyValue bool
if indexDataDisks < len(disksBlocks) {
if diskBlock, ok := disksBlocks[indexDataDisks].(map[string]interface{}); ok {
if perm, exists := diskBlock["permanently"]; exists {
if permBool, ok := perm.(bool); ok {
pernamentlyValue = permBool
}
}
}
}
var nodeIds *schema.Set
if indexDataDisks < len(disksBlocks) {
if diskBlock, ok := disksBlocks[indexDataDisks].(map[string]interface{}); ok {
if nodeIds, exists := diskBlock["node_ids"]; exists {
nodeIds = nodeIds.(*schema.Set)
}
}
}
temp := map[string]interface{}{
"disk_name": disk.Name,
@@ -258,10 +276,16 @@ func flattenComputeDisks(disksList compute.ListDisks, disksBlocks, extraDisks []
"shareable": disk.Shareable,
"size_used": disk.SizeUsed,
"size_max": disk.SizeMax,
"permanently": pernamentlyValue,
"present_to": disk.PresentTo,
"storage_policy_id": disk.StoragePolicyID,
"to_clean": disk.ToClean,
"permanently": pernamentlyValue,
"devicename": disk.DeviceName,
"create_by": disk.CreatedBy,
"create_time": disk.CreatedTime,
"delete_by": disk.DeletedBy,
"delete_time": disk.DeletedTime,
"update_time": disk.UpdatedTime,
}
res = append(res, temp)
indexDataDisks++
@@ -556,6 +580,16 @@ func flattenDisks(disks []compute.InfoDisk) []map[string]interface{} {
return res
}
func flattenLoaderMetaIso(loaderMetaIso compute.LoaderMetaIso) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"device_name": loaderMetaIso.DeviceName,
"path": loaderMetaIso.Path,
}
res = append(res, temp)
return res
}
func flattenComputeAudits(computeAudits compute.ListDetailedAudits) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computeAudits.Data))
for _, computeAudit := range computeAudits.Data {
@@ -808,6 +842,7 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
d.Set("hot_resize", compFacts.HotResize)
d.Set("network_interface_naming", compFacts.NetworkInterfaceNaming)
d.Set("zone_id", compFacts.ZoneID)
d.Set("loader_meta_iso", flattenLoaderMetaIso(compFacts.LoaderMetaIso))
d.Set("os_version", compFacts.OSVersion)
//extra fields setting
bootDisk := findBootDisk(compFacts.Disks)
@@ -868,7 +903,10 @@ func flattenDisk(diskList compute.ListDisks) []map[string]interface{} {
"boot_partition": disk.BootPartition,
"bus_number": disk.BusNumber,
"created_time": disk.CreatedTime,
"created_by": disk.CreatedBy,
"deleted_time": disk.DeletedTime,
"deleted_by": disk.DeletedBy,
"devicename": disk.DeviceName,
"desc": disk.Description,
"destruction_time": disk.DestructionTime,
"disk_path": disk.DiskPath,
@@ -907,6 +945,7 @@ func flattenDisk(diskList compute.ListDisks) []map[string]interface{} {
"tech_status": disk.TechStatus,
"type": disk.Type,
"to_clean": disk.ToClean,
"updated_time": disk.UpdatedTime,
}
res = append(res, temp)
}

View File

@@ -344,26 +344,6 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if pin, ok := d.GetOk("pin_to_stack"); ok && pin.(bool) {
req := compute.PinToStackRequest{
ComputeID: computeId,
TargetStackID: uint64(d.Get("stack_id").(int)),
}
if force, ok := d.Get("force_pin").(bool); ok {
req.Force = force
}
if autoStart, ok := d.Get("auto_start_w_node").(bool); ok {
req.AutoStart = autoStart
}
_, err := c.CloudBroker().Compute().PinToStack(ctx, req)
if err != nil {
warnings.Add(err)
}
}
if libvirtSettings, ok := d.GetOk("libvirt_settings"); ok {
if libvirtSettings.(*schema.Set).Len() > 0 {
lvs := libvirtSettings.(*schema.Set).List()
@@ -456,6 +436,36 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if pin, ok := d.GetOk("pin_to_stack"); ok && pin.(bool) {
start, _ := d.GetOk("started")
_, stackOk := d.GetOk("stack_id")
if !start.(bool) && !stackOk {
warnings.Add(errors.New("cannot pin to stack a VM, that is not started and stack_id is not set"))
} else {
req := compute.PinToStackRequest{
ComputeID: computeId,
}
if stackID, ok := d.Get("stack_id").(int); ok {
req.TargetStackID = uint64(stackID)
}
if force, ok := d.Get("force_pin").(bool); ok {
req.Force = force
}
if autoStart, ok := d.Get("auto_start_w_node").(bool); ok {
req.AutoStart = autoStart
}
_, err := c.CloudBroker().Compute().PinToStack(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
if affinityLabel, ok := d.GetOk("affinity_label"); ok {
req := compute.AffinityLabelSetRequest{
ComputeIDs: []uint64{
@@ -950,33 +960,6 @@ func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interf
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
if start, ok := d.GetOk("started"); ok {
if start.(bool) {
req := compute.StopRequest{ComputeID: computeId}
log.Debugf("resourceComputeDelete: stoping Compute ID %d", computeId)
if _, err := c.CloudBroker().Compute().Stop(ctx, req); err != nil {
diag.FromErr(err)
}
}
}
pciList, ok := d.GetOk("pci_devices")
if d.Get("permanently").(bool) && ok {
pciDevices := pciList.(*schema.Set).List()
for _, v := range pciDevices {
pciID := v.(int)
req := compute.DetachPCIDeviceRequest{
ComputeID: computeId,
DeviceID: uint64(pciID),
}
_, err := c.CloudBroker().Compute().DetachPciDevice(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
req := compute.DeleteRequest{
ComputeID: computeId,
Permanently: d.Get("permanently").(bool),

View File

@@ -530,6 +530,22 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"devicename": {
Type: schema.TypeString,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
@@ -971,6 +987,22 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"loader_meta_iso": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"device_name": {
Type: schema.TypeString,
Computed: true,
},
"path": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"user_data": {
Type: schema.TypeString,
Computed: true,
@@ -3760,6 +3792,30 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeBool,
Computed: true,
},
"devicename": {
Type: schema.TypeString,
Computed: true,
},
"create_by": {
Type: schema.TypeString,
Computed: true,
},
"create_time": {
Type: schema.TypeInt,
Computed: true,
},
"delete_by": {
Type: schema.TypeString,
Computed: true,
},
"delete_time": {
Type: schema.TypeInt,
Computed: true,
},
"update_time": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
@@ -4561,5 +4617,21 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"loader_meta_iso": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"device_name": {
Type: schema.TypeString,
Computed: true,
},
"path": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
}
}

View File

@@ -1618,6 +1618,13 @@ func utilityComputePinToStack(ctx context.Context, d *schema.ResourceData, m int
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
start, _ := d.GetOk("started")
_, stackOk := d.GetOk("stack_id")
if !start.(bool) && !stackOk {
errors.New("cannot pin to stack a VM, that is not started and stack_id is not set")
}
oldPin, newPin := d.GetChange("pin_to_stack")
if oldPin.(bool) && !newPin.(bool) {
req := compute.UnpinFromStackRequest{