Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 987fef81d7 | |||
|
|
f44c69003b | ||
| 22897c3bf5 | |||
|
|
bae25296bb |
19
CHANGELOG.md
19
CHANGELOG.md
@@ -1,23 +1,8 @@
|
|||||||
## Version 4.9.2
|
## Version 4.9.6
|
||||||
|
|
||||||
### Добавлено
|
|
||||||
|
|
||||||
### Исправлено
|
### Исправлено
|
||||||
|
|
||||||
#### image
|
|
||||||
| Идентификатор<br>задачи | Описание |
|
|
||||||
| --- | --- |
|
|
||||||
| BATF-999 | Изменен тип поля `url` с optional на required в resource `decort_cb_image` в cloudbroker/image |
|
|
||||||
|
|
||||||
#### kvmvm
|
#### kvmvm
|
||||||
| Идентификатор<br>задачи | Описание |
|
| Идентификатор<br>задачи | Описание |
|
||||||
| --- | --- |
|
| --- | --- |
|
||||||
| BATF-1002 | Ошибка при смене `ip_address` в блоке `network` приводящаяя к смене MAC адрес сети в resource `decort_compute` и `decort_cb_compute` в cloupapi/kvmvm и cloudbroker/kvmvm |
|
| BATF-1147 | Остановка виртуальной машины перед удалением в resources `decort_kvmvm` и `decort_cb_kvmvm` в cloudapi/kvmvm и в cloudbroker/kvmvm |
|
||||||
|
|
||||||
### Удалено
|
|
||||||
|
|
||||||
#### image
|
|
||||||
| Идентификатор<br>задачи | Описание |
|
|
||||||
| --- | --- |
|
|
||||||
| BATF-999 | Опциональное поле `file_path` в resource `decort_cb_image` в cloudbroker/image |
|
|
||||||
|
|
||||||
2
Makefile
2
Makefile
@@ -7,7 +7,7 @@ ZIPDIR = ./zip
|
|||||||
BINARY=${NAME}
|
BINARY=${NAME}
|
||||||
WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH}
|
WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH}
|
||||||
MAINPATH = ./cmd/decort/
|
MAINPATH = ./cmd/decort/
|
||||||
VERSION=4.9.2
|
VERSION=4.9.6
|
||||||
OS_ARCH=$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH)
|
OS_ARCH=$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH)
|
||||||
|
|
||||||
FILES = ${BINARY}_${VERSION}_darwin_amd64\
|
FILES = ${BINARY}_${VERSION}_darwin_amd64\
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
Terraform provider для платформы Digital Energy Cloud Orchestration Technology (DECORT)
|
Terraform provider для платформы Digital Energy Cloud Orchestration Technology (DECORT)
|
||||||
|
|
||||||
## Соответсвие версий платформы версиям провайдера
|
## Соответствие версий платформы версиям провайдера
|
||||||
|
|
||||||
| Версия DECORT API | Версия провайдера Terraform |
|
| Версия DECORT API | Версия провайдера Terraform |
|
||||||
| ------ | ------ |
|
| ------ | ------ |
|
||||||
|
|||||||
@@ -297,7 +297,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
|
|
||||||
loaderType, loaderTypeOk := d.GetOk("loader_type")
|
loaderType, loaderTypeOk := d.GetOk("loader_type")
|
||||||
bootType, bootTypeOk := d.GetOk("boot_type")
|
bootType, bootTypeOk := d.GetOk("boot_type")
|
||||||
hotResize, hotResizeOk := d.GetOk("hot_resize")
|
hotResize, hotResizeOk := d.GetOkExists("hot_resize")
|
||||||
networkInterfaceNaming, networkInterfaceNamingOk := d.GetOk("network_interface_naming")
|
networkInterfaceNaming, networkInterfaceNamingOk := d.GetOk("network_interface_naming")
|
||||||
|
|
||||||
if loaderTypeOk {
|
if loaderTypeOk {
|
||||||
@@ -842,62 +842,6 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
doUpdate := false
|
|
||||||
resizeReq := compute.ResizeRequest{
|
|
||||||
ComputeID: computeRec.ID,
|
|
||||||
}
|
|
||||||
forceResize, ok := d.GetOk("force_resize")
|
|
||||||
if ok {
|
|
||||||
resizeReq.Force = forceResize.(bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
warnings := dc.Warnings{}
|
|
||||||
|
|
||||||
oldCpu, newCpu := d.GetChange("cpu")
|
|
||||||
if oldCpu.(int) > newCpu.(int) && !forceResize.(bool) {
|
|
||||||
return diag.Errorf("Cannot resize compute ID %d: enable 'force_resize' to reduce compute vCPUs", computeRec.ID)
|
|
||||||
}
|
|
||||||
if oldCpu.(int) != newCpu.(int) {
|
|
||||||
resizeReq.CPU = uint64(newCpu.(int))
|
|
||||||
doUpdate = true
|
|
||||||
} else {
|
|
||||||
resizeReq.CPU = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if resizeReq.CPU != 0 {
|
|
||||||
if preferredCPU, ok := d.GetOk("preferred_cpu"); ok {
|
|
||||||
preferredList := preferredCPU.([]interface{})
|
|
||||||
if len(preferredList) > 0 {
|
|
||||||
for _, v := range preferredList {
|
|
||||||
cpuNum := v.(int)
|
|
||||||
resizeReq.PreferredCPU = append(resizeReq.PreferredCPU, int64(cpuNum))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
oldPCPU, newPCPU := d.GetChange("preferred_cpu")
|
|
||||||
if len(oldPCPU.([]interface{})) != 0 && len(newPCPU.([]interface{})) == 0 {
|
|
||||||
resizeReq.PreferredCPU = []int64{-1}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
oldRam, newRam := d.GetChange("ram")
|
|
||||||
if oldRam.(int) != newRam.(int) {
|
|
||||||
resizeReq.RAM = uint64(newRam.(int))
|
|
||||||
doUpdate = true
|
|
||||||
} else {
|
|
||||||
resizeReq.RAM = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if doUpdate {
|
|
||||||
log.Debugf("resourceComputeUpdate: changing CPU %d -> %d and/or RAM %d -> %d",
|
|
||||||
oldCpu.(int), newCpu.(int),
|
|
||||||
oldRam.(int), newRam.(int))
|
|
||||||
_, err := c.CloudAPI().Compute().Resize(ctx, resizeReq)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
oldSize, newSize := d.GetChange("boot_disk_size")
|
oldSize, newSize := d.GetChange("boot_disk_size")
|
||||||
if oldSize.(int) < newSize.(int) {
|
if oldSize.(int) < newSize.(int) {
|
||||||
req := compute.DiskResizeRequest{ComputeID: computeRec.ID}
|
req := compute.DiskResizeRequest{ComputeID: computeRec.ID}
|
||||||
@@ -957,6 +901,79 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Note bene: numa_affinity, cpu_pin and hp_backed are not allowed to be changed for compute in STARTED tech status.
|
||||||
|
// If STARTED, we need to stop it before update
|
||||||
|
var isStopRequired bool
|
||||||
|
if d.HasChanges("numa_affinity", "cpu_pin", "hp_backed", "chipset", "preferred_cpu", "hot_resize") && d.Get("started").(bool) {
|
||||||
|
isStopRequired = true
|
||||||
|
}
|
||||||
|
|
||||||
|
old, new := d.GetChange("cpu")
|
||||||
|
if old.(int) > new.(int) && d.Get("started").(bool) && d.Get("force_resize").(bool) {
|
||||||
|
isStopRequired = true
|
||||||
|
}
|
||||||
|
if isStopRequired {
|
||||||
|
if _, err := c.CloudAPI().Compute().Stop(ctx, compute.StopRequest{ComputeID: computeRec.ID}); err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
doUpdate := false
|
||||||
|
resizeReq := compute.ResizeRequest{
|
||||||
|
ComputeID: computeRec.ID,
|
||||||
|
}
|
||||||
|
forceResize, ok := d.GetOk("force_resize")
|
||||||
|
if ok {
|
||||||
|
resizeReq.Force = forceResize.(bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
warnings := dc.Warnings{}
|
||||||
|
|
||||||
|
oldCpu, newCpu := d.GetChange("cpu")
|
||||||
|
if oldCpu.(int) > newCpu.(int) && !forceResize.(bool) {
|
||||||
|
return diag.Errorf("Cannot resize compute ID %d: enable 'force_resize' to reduce compute vCPUs", computeRec.ID)
|
||||||
|
}
|
||||||
|
if oldCpu.(int) != newCpu.(int) {
|
||||||
|
resizeReq.CPU = uint64(newCpu.(int))
|
||||||
|
doUpdate = true
|
||||||
|
} else {
|
||||||
|
resizeReq.CPU = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if resizeReq.CPU != 0 {
|
||||||
|
if preferredCPU, ok := d.GetOk("preferred_cpu"); ok {
|
||||||
|
preferredList := preferredCPU.([]interface{})
|
||||||
|
if len(preferredList) > 0 {
|
||||||
|
for _, v := range preferredList {
|
||||||
|
cpuNum := v.(int)
|
||||||
|
resizeReq.PreferredCPU = append(resizeReq.PreferredCPU, int64(cpuNum))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
oldPCPU, newPCPU := d.GetChange("preferred_cpu")
|
||||||
|
if len(oldPCPU.([]interface{})) != 0 && len(newPCPU.([]interface{})) == 0 {
|
||||||
|
resizeReq.PreferredCPU = []int64{-1}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
oldRam, newRam := d.GetChange("ram")
|
||||||
|
if oldRam.(int) != newRam.(int) {
|
||||||
|
resizeReq.RAM = uint64(newRam.(int))
|
||||||
|
doUpdate = true
|
||||||
|
} else {
|
||||||
|
resizeReq.RAM = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if doUpdate {
|
||||||
|
log.Debugf("resourceComputeUpdate: changing CPU %d -> %d and/or RAM %d -> %d",
|
||||||
|
oldCpu.(int), newCpu.(int),
|
||||||
|
oldRam.(int), newRam.(int))
|
||||||
|
_, err := c.CloudAPI().Compute().Resize(ctx, resizeReq)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if d.HasChanges("description",
|
if d.HasChanges("description",
|
||||||
"name",
|
"name",
|
||||||
"numa_affinity",
|
"numa_affinity",
|
||||||
@@ -1020,28 +1037,17 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
req.NetworkInterfaceNaming = d.Get("network_interface_naming").(string)
|
req.NetworkInterfaceNaming = d.Get("network_interface_naming").(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note bene: numa_affinity, cpu_pin and hp_backed are not allowed to be changed for compute in STARTED tech status.
|
|
||||||
// If STARTED, we need to stop it before update
|
|
||||||
var isStopRequired bool
|
|
||||||
if d.HasChanges("numa_affinity", "cpu_pin", "hp_backed", "chipset", "preferred_cpu") && d.Get("started").(bool) {
|
|
||||||
isStopRequired = true
|
|
||||||
}
|
|
||||||
if isStopRequired {
|
|
||||||
if _, err := c.CloudAPI().Compute().Stop(ctx, compute.StopRequest{ComputeID: computeRec.ID}); err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// perform update
|
// perform update
|
||||||
if _, err := c.CloudAPI().Compute().Update(ctx, req); err != nil {
|
if _, err := c.CloudAPI().Compute().Update(ctx, req); err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If used to be STARTED, we need to start it after update
|
}
|
||||||
if isStopRequired {
|
|
||||||
if _, err := c.CloudAPI().Compute().Start(ctx, compute.StartRequest{ComputeID: computeRec.ID}); err != nil {
|
// If used to be STARTED, we need to start it after update
|
||||||
return diag.FromErr(err)
|
if isStopRequired {
|
||||||
}
|
if _, err := c.CloudAPI().Compute().Start(ctx, compute.StartRequest{ComputeID: computeRec.ID}); err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1097,15 +1103,6 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(deletedDisks) > 0 {
|
if len(deletedDisks) > 0 {
|
||||||
stopReq := compute.StopRequest{
|
|
||||||
ComputeID: computeRec.ID,
|
|
||||||
Force: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := c.CloudAPI().Compute().Stop(ctx, stopReq)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, disk := range deletedDisks {
|
for _, disk := range deletedDisks {
|
||||||
diskConv := disk.(map[string]interface{})
|
diskConv := disk.(map[string]interface{})
|
||||||
@@ -1126,14 +1123,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
req := compute.StartRequest{
|
|
||||||
ComputeID: computeRec.ID,
|
|
||||||
AltBootID: 0,
|
|
||||||
}
|
|
||||||
_, err = c.CloudAPI().Compute().Start(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(addedDisks) > 0 {
|
if len(addedDisks) > 0 {
|
||||||
@@ -1206,24 +1196,6 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("started") {
|
|
||||||
if d.Get("started").(bool) {
|
|
||||||
req := compute.StartRequest{
|
|
||||||
ComputeID: computeRec.ID,
|
|
||||||
}
|
|
||||||
if _, err := c.CloudAPI().Compute().Start(ctx, req); err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
req := compute.StopRequest{
|
|
||||||
ComputeID: computeRec.ID,
|
|
||||||
}
|
|
||||||
if _, err := c.CloudAPI().Compute().Stop(ctx, req); err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.HasChange("affinity_label") {
|
if d.HasChange("affinity_label") {
|
||||||
affinityLabel := d.Get("affinity_label").(string)
|
affinityLabel := d.Get("affinity_label").(string)
|
||||||
if affinityLabel == "" {
|
if affinityLabel == "" {
|
||||||
@@ -1782,32 +1754,6 @@ func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
c := m.(*controller.ControllerCfg)
|
c := m.(*controller.ControllerCfg)
|
||||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||||
|
|
||||||
if start, ok := d.GetOk("started"); ok {
|
|
||||||
if start.(bool) {
|
|
||||||
req := compute.StopRequest{ComputeID: computeId}
|
|
||||||
log.Debugf("resourceComputeDelete: stoping Compute ID %d", computeId)
|
|
||||||
if _, err := c.CloudAPI().Compute().Stop(ctx, req); err != nil {
|
|
||||||
diag.FromErr(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pciList, ok := d.GetOk("pci_devices")
|
|
||||||
|
|
||||||
if d.Get("permanently").(bool) && ok {
|
|
||||||
pciDevices := pciList.(*schema.Set).List()
|
|
||||||
for _, v := range pciDevices {
|
|
||||||
pciID := v.(int)
|
|
||||||
req := compute.DetachPCIDeviceRequest{
|
|
||||||
ComputeID: computeId,
|
|
||||||
DeviceID: uint64(pciID),
|
|
||||||
}
|
|
||||||
_, err := c.CloudAPI().Compute().DetachPCIDevice(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
req := compute.DeleteRequest{
|
req := compute.DeleteRequest{
|
||||||
ComputeID: computeId,
|
ComputeID: computeId,
|
||||||
Permanently: d.Get("permanently").(bool),
|
Permanently: d.Get("permanently").(bool),
|
||||||
|
|||||||
@@ -251,7 +251,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
|
|
||||||
loaderType, loaderTypeOk := d.GetOk("loader_type")
|
loaderType, loaderTypeOk := d.GetOk("loader_type")
|
||||||
bootType, bootTypeOk := d.GetOk("boot_type")
|
bootType, bootTypeOk := d.GetOk("boot_type")
|
||||||
hotResize, hotResizeOk := d.GetOk("hot_resize")
|
hotResize, hotResizeOk := d.GetOkExists("hot_resize")
|
||||||
networkInterfaceNaming, networkInterfaceNamingOk := d.GetOk("network_interface_naming")
|
networkInterfaceNaming, networkInterfaceNamingOk := d.GetOk("network_interface_naming")
|
||||||
|
|
||||||
if loaderTypeOk {
|
if loaderTypeOk {
|
||||||
@@ -875,33 +875,6 @@ func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
c := m.(*controller.ControllerCfg)
|
c := m.(*controller.ControllerCfg)
|
||||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||||
|
|
||||||
if start, ok := d.GetOk("started"); ok {
|
|
||||||
if start.(bool) {
|
|
||||||
req := compute.StopRequest{ComputeID: computeId}
|
|
||||||
log.Debugf("resourceComputeDelete: stoping Compute ID %d", computeId)
|
|
||||||
if _, err := c.CloudBroker().Compute().Stop(ctx, req); err != nil {
|
|
||||||
diag.FromErr(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pciList, ok := d.GetOk("pci_devices")
|
|
||||||
|
|
||||||
if d.Get("permanently").(bool) && ok {
|
|
||||||
pciDevices := pciList.(*schema.Set).List()
|
|
||||||
for _, v := range pciDevices {
|
|
||||||
pciID := v.(int)
|
|
||||||
req := compute.DetachPCIDeviceRequest{
|
|
||||||
ComputeID: computeId,
|
|
||||||
DeviceID: uint64(pciID),
|
|
||||||
}
|
|
||||||
_, err := c.CloudBroker().Compute().DetachPciDevice(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
req := compute.DeleteRequest{
|
req := compute.DeleteRequest{
|
||||||
ComputeID: computeId,
|
ComputeID: computeId,
|
||||||
Permanently: d.Get("permanently").(bool),
|
Permanently: d.Get("permanently").(bool),
|
||||||
|
|||||||
@@ -111,6 +111,22 @@ func utilityComputeResize(ctx context.Context, d *schema.ResourceData, m interfa
|
|||||||
c := m.(*controller.ControllerCfg)
|
c := m.(*controller.ControllerCfg)
|
||||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||||
|
|
||||||
|
var isStopRequired bool
|
||||||
|
|
||||||
|
old, new := d.GetChange("cpu")
|
||||||
|
if d.Get("started").(bool) && (old.(int) > new.(int)) && d.Get("force_resize").(bool) {
|
||||||
|
isStopRequired = true
|
||||||
|
}
|
||||||
|
if isStopRequired {
|
||||||
|
stopReq := compute.StopRequest{
|
||||||
|
ComputeID: computeId,
|
||||||
|
Force: false,
|
||||||
|
}
|
||||||
|
if _, err := c.CloudBroker().Compute().Stop(ctx, stopReq); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
resizeReq := compute.ResizeRequest{
|
resizeReq := compute.ResizeRequest{
|
||||||
ComputeID: computeId,
|
ComputeID: computeId,
|
||||||
}
|
}
|
||||||
@@ -166,6 +182,12 @@ func utilityComputeResize(ctx context.Context, d *schema.ResourceData, m interfa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if isStopRequired {
|
||||||
|
if _, err := c.CloudBroker().Compute().Start(ctx, compute.StartRequest{ComputeID: computeId}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -259,15 +281,6 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(deletedDisks) > 0 {
|
if len(deletedDisks) > 0 {
|
||||||
stopReq := compute.StopRequest{
|
|
||||||
ComputeID: computeId,
|
|
||||||
Force: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := c.CloudBroker().Compute().Stop(ctx, stopReq)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, disk := range deletedDisks {
|
for _, disk := range deletedDisks {
|
||||||
diskConv := disk.(map[string]interface{})
|
diskConv := disk.(map[string]interface{})
|
||||||
@@ -286,14 +299,7 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
req := compute.StartRequest{
|
|
||||||
ComputeID: computeId,
|
|
||||||
AltBootID: 0,
|
|
||||||
}
|
|
||||||
_, err = c.CloudBroker().Compute().Start(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(addedDisks) > 0 {
|
if len(addedDisks) > 0 {
|
||||||
@@ -905,7 +911,7 @@ func utilityComputeUpdate(ctx context.Context, d *schema.ResourceData, m interfa
|
|||||||
// Note bene: numa_affinity, cpu_pin and hp_backed are not allowed to be changed for compute in STARTED tech status.
|
// Note bene: numa_affinity, cpu_pin and hp_backed are not allowed to be changed for compute in STARTED tech status.
|
||||||
// If STARTED, we need to stop it before update
|
// If STARTED, we need to stop it before update
|
||||||
var isStopRequired bool
|
var isStopRequired bool
|
||||||
if d.HasChanges("numa_affinity", "cpu_pin", "hp_backed", "chipset", "preferred_cpu") && d.Get("started").(bool) {
|
if d.HasChanges("numa_affinity", "cpu_pin", "hp_backed", "chipset", "preferred_cpu", "hot_resize") && d.Get("started").(bool) {
|
||||||
isStopRequired = true
|
isStopRequired = true
|
||||||
}
|
}
|
||||||
if isStopRequired {
|
if isStopRequired {
|
||||||
|
|||||||
Reference in New Issue
Block a user