This commit is contained in:
asteam
2025-03-28 12:05:19 +03:00
parent 5496073a0c
commit efe0c88556
299 changed files with 33035 additions and 100 deletions

View File

@@ -288,6 +288,25 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
log.Debugf("resourceComputeCreate: new simple Compute ID %d, name %s created", computeId, d.Get("name").(string))
if ars, ok := d.GetOk("pci_devices"); ok {
log.Debugf("resourceComputeCreate: add pci devices on ComputeID: %d", computeId)
addedPciDevices := ars.(*schema.Set).List()
if len(addedPciDevices) > 0 {
for _, v := range addedPciDevices {
devicesConv := v.(int)
req := compute.AttachPCIDeviceRequest{
ComputeID: computeId,
DeviceID: uint64(devicesConv),
}
_, err := c.CloudAPI().Compute().AttachPCIDevice(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
}
argVal, ok = d.GetOk("extra_disks")
if ok && argVal.(*schema.Set).Len() > 0 {
log.Debugf("resourceComputeCreate: calling utilityComputeExtraDisksConfigure to attach %d extra disk(s)", argVal.(*schema.Set).Len())
@@ -531,24 +550,6 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if ars, ok := d.GetOk("pci_devices"); ok {
log.Debugf("resourceComputeCreate: add pci devices on ComputeID: %d", computeId)
addedPciDevices := ars.(*schema.Set).List()
if len(addedPciDevices) > 0 {
for _, v := range addedPciDevices {
devicesConv := v.(int)
req := compute.AttachPCIDeviceRequest{
ComputeID: computeId,
DeviceID: uint64(devicesConv),
}
_, err := c.CloudAPI().Compute().AttachPCIDevice(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
}
}
log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", computeId, d.Get("name").(string))
@@ -1582,6 +1583,9 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
ComputeID: computeRec.ID,
Force: false,
}
if forceStop, ok := d.GetOk("force_stop"); ok {
stopReq.Force = forceStop.(bool)
}
_, err := c.CloudAPI().Compute().Stop(ctx, stopReq)
if err != nil {
@@ -1592,15 +1596,13 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
req := compute.RedeployRequest{
ComputeID: computeRec.ID,
ImageID: uint64(newImage.(int)),
DataDisks: "KEEP",
}
if diskSize, ok := d.GetOk("boot_disk_size"); ok {
req.DiskSize = uint64(diskSize.(int))
}
if dataDisks, ok := d.GetOk("data_disks"); ok {
req.DataDisks = dataDisks.(string)
}
if autoStart, ok := d.GetOk("auto_start"); ok {
if autoStart, ok := d.GetOk("started"); ok {
req.AutoStart = autoStart.(bool)
}
if forceStop, ok := d.GetOk("force_stop"); ok {
@@ -1716,6 +1718,16 @@ func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interf
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
if start, ok := d.GetOk("started"); ok {
if start.(bool) {
req := compute.StopRequest{ComputeID: computeId}
log.Debugf("resourceComputeDelete: stoping Compute ID %d", computeId)
if _, err := c.CloudAPI().Compute().Stop(ctx, req); err != nil {
diag.FromErr(err)
}
}
}
pciList, ok := d.GetOk("pci_devices")
if d.Get("permanently").(bool) && ok {
@@ -2180,12 +2192,6 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Flag for start compute after node exits from MAINTENANCE state",
},
"auto_start": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Flag for redeploy compute",
},
"force_stop": {
Type: schema.TypeBool,
Optional: true,
@@ -2198,13 +2204,6 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "Flag for resize compute",
},
"data_disks": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"KEEP", "DETACH", "DESTROY"}, false),
Default: "DETACH",
Description: "Flag for redeploy compute",
},
"started": {
Type: schema.TypeBool,
Optional: true,
@@ -2515,7 +2514,7 @@ func ResourceCompute() *schema.Resource {
CustomizeDiff: func(ctx context.Context, diff *schema.ResourceDiff, i interface{}) error {
if diff.HasChanges() || diff.HasChanges("chipset", "pin_to_stack", "auto_start_w_node", "network", "affinity_rules", "anti_affinity_rules",
"disks", "extra_disks", "tags", "port_forwarding", "user_access", "snapshot", "pci_devices", "preferred_cpu") {
"extra_disks", "tags", "port_forwarding", "user_access", "snapshot", "pci_devices", "preferred_cpu") {
diff.SetNewComputed("updated_time")
diff.SetNewComputed("updated_by")
}

View File

@@ -242,6 +242,25 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
log.Debugf("resourceComputeCreate: new simple Compute ID %d, name %s created", computeId, d.Get("name").(string))
if ars, ok := d.GetOk("pci_devices"); ok {
log.Debugf("resourceComputeCreate: add pci devices on ComputeID: %d", computeId)
addedPciDevices := ars.(*schema.Set).List()
if len(addedPciDevices) > 0 {
for _, v := range addedPciDevices {
devicesConv := v.(int)
req := compute.AttachPCIDeviceRequest{
ComputeID: computeId,
DeviceID: uint64(devicesConv),
}
_, err := c.CloudBroker().Compute().AttachPCIDevice(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
}
extraDisks, ok := d.GetOk("extra_disks")
if ok && extraDisks.(*schema.Set).Len() > 0 {
log.Debugf("resourceComputeCreate: calling utilityComputeExtraDisksConfigure to attach %d extra disk(s)", extraDisks.(*schema.Set).Len())
@@ -273,7 +292,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
if pin, ok := d.GetOk("pin_to_stack"); ok && pin.(bool) {
req := compute.PinToStackRequest{
ComputeID: computeId,
TargetStackID: uint64(d.Get("target_stack_id").(int)),
TargetStackID: uint64(d.Get("stack_id").(int)),
}
if force, ok := d.Get("force_pin").(bool); ok {
@@ -534,24 +553,6 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if ars, ok := d.GetOk("pci_devices"); ok {
log.Debugf("resourceComputeCreate: add pci devices on ComputeID: %d", computeId)
addedPciDevices := ars.(*schema.Set).List()
if len(addedPciDevices) > 0 {
for _, v := range addedPciDevices {
devicesConv := v.(int)
req := compute.AttachPCIDeviceRequest{
ComputeID: computeId,
DeviceID: uint64(devicesConv),
}
_, err := c.CloudBroker().Compute().AttachPCIDevice(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
}
}
log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", computeId, d.Get("name").(string))
@@ -829,6 +830,16 @@ func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interf
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
if start, ok := d.GetOk("started"); ok {
if start.(bool) {
req := compute.StopRequest{ComputeID: computeId}
log.Debugf("resourceComputeDelete: stoping Compute ID %d", computeId)
if _, err := c.CloudBroker().Compute().Stop(ctx, req); err != nil {
diag.FromErr(err)
}
}
}
pciList, ok := d.GetOk("pci_devices")
if d.Get("permanently").(bool) && ok {
@@ -867,13 +878,18 @@ func ResourceCompute() *schema.Resource {
CustomizeDiff: func(ctx context.Context, diff *schema.ResourceDiff, i interface{}) error {
if diff.HasChanges() || diff.HasChanges("chipset", "pin_to_stack", "auto_start_w_node", "libvirt_settings", "network", "affinity_rules", "anti_affinity_rules",
"disks", "extra_disks", "tags", "port_forwarding", "user_access", "snapshot", "pci_devices", "preferred_cpu") {
"extra_disks", "tags", "port_forwarding", "user_access", "snapshot", "pci_devices", "preferred_cpu") {
diff.SetNewComputed("updated_time")
diff.SetNewComputed("updated_by")
}
if diff.HasChanges("pin_to_stack") {
diff.SetNewComputed("pinned")
}
if diff.HasChanges("started") {
diff.SetNewComputed("tech_status")
diff.SetNewComputed("updated_time")
diff.SetNewComputed("updated_by")
}
return nil
},

View File

@@ -3422,10 +3422,6 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Optional: true,
Computed: true,
},
"target_stack_id": {
Type: schema.TypeInt,
Optional: true,
},
"force_pin": {
Type: schema.TypeBool,
Optional: true,
@@ -3452,12 +3448,6 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Optional: true,
Default: false,
},
"auto_start": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Flag for redeploy compute",
},
"force_stop": {
Type: schema.TypeBool,
Optional: true,
@@ -3470,13 +3460,6 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "Flag for resize compute",
},
"data_disks": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"KEEP", "DETACH", "DESTROY"}, false),
Default: "DETACH",
Description: "Flag for redeploy compute",
},
"detach_disks": {
Type: schema.TypeBool,
Optional: true,

View File

@@ -1456,7 +1456,7 @@ func utilityComputePinToStack(ctx context.Context, d *schema.ResourceData, m int
if !oldPin.(bool) && newPin.(bool) {
req := compute.PinToStackRequest{
ComputeID: computeId,
TargetStackID: uint64(d.Get("target_stack_id").(int)),
TargetStackID: uint64(d.Get("stack_id").(int)),
}
if force, ok := d.Get("force_pin").(bool); ok {
@@ -1534,6 +1534,9 @@ func utilityComputeUpdateImage(ctx context.Context, d *schema.ResourceData, m in
if depresent, ok := d.Get("depresent").(bool); ok {
stopReq.Depresent = depresent
}
if forceStop, ok := d.GetOk("force_stop"); ok {
stopReq.Force = forceStop.(bool)
}
_, err := c.CloudBroker().Compute().Stop(ctx, stopReq)
if err != nil {
@@ -1544,15 +1547,13 @@ func utilityComputeUpdateImage(ctx context.Context, d *schema.ResourceData, m in
req := compute.RedeployRequest{
ComputeID: computeId,
ImageID: uint64(newImage.(int)),
DataDisks: "KEEP",
}
if diskSize, ok := d.GetOk("boot_disk_size"); ok {
req.DiskSize = uint64(diskSize.(int))
}
if dataDisks, ok := d.GetOk("data_disks"); ok {
req.DataDisks = dataDisks.(string)
}
if autoStart, ok := d.GetOk("auto_start"); ok {
if autoStart, ok := d.GetOk("started"); ok {
req.AutoStart = autoStart.(bool)
}
if forceStop, ok := d.GetOk("force_stop"); ok {