This commit is contained in:
2023-12-19 16:37:50 +03:00
parent 20050bc169
commit f49d9f8860
150 changed files with 12582 additions and 11709 deletions

View File

@@ -54,6 +54,7 @@ func findInExtraDisks(DiskId uint, ExtraDisks []interface{}) bool {
func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
compute, err := utilityDataComputeCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(strconv.Itoa(int(compute.ID)))
@@ -290,6 +291,10 @@ func computeListDisksSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"image_name": {
Type: schema.TypeString,
Computed: true,
},
"images": {
Type: schema.TypeList,
Computed: true,
@@ -444,6 +449,10 @@ func computeInterfacesSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"enabled": {
Type: schema.TypeBool,
Computed: true,
},
"flip_group_id": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -44,6 +44,7 @@ import (
func dataSourceComputeAuditsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeAudits, err := utilityComputeAuditsCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()

View File

@@ -44,6 +44,7 @@ import (
func dataSourceComputeGetAuditsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeAudits, err := utilityComputeGetAuditsCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()

View File

@@ -45,12 +45,13 @@ import (
func dataSourceComputeGetConsoleUrlRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeConsoleUrl, err := utilityComputeGetConsoleUrlCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
result := strings.ReplaceAll(string(computeConsoleUrl), "\"", "")
result = strings.ReplaceAll(string(result), "\\", "")
result := strings.ReplaceAll(computeConsoleUrl, "\"", "")
result = strings.ReplaceAll(result, "\\", "")
d.Set("console_url", result)
return nil
}

View File

@@ -44,6 +44,7 @@ import (
func dataSourceComputeGetLogRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeGetLog, err := utilityComputeGetLogCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()

View File

@@ -44,6 +44,7 @@ import (
func dataSourceComputeListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeList, err := utilityDataComputeListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}

View File

@@ -44,6 +44,7 @@ import (
func dataSourceComputeListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeList, err := utilityDataComputeListDeletedCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}

View File

@@ -44,6 +44,7 @@ import (
func dataSourceComputePCIDeviceListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computePCIDeviceList, err := utilityComputePCIDeviceListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
@@ -80,13 +81,13 @@ func dataSourceComputePCIDeviceListSchemaMake() map[string]*schema.Schema {
Description: "Find by status",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {

View File

@@ -44,6 +44,7 @@ import (
func dataSourceComputePfwListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computePfwList, err := utilityComputePfwListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()

View File

@@ -12,6 +12,7 @@ import (
func dataSourceComputeSnapshotUsageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeSnapshotUsage, err := utilityComputeSnapshotUsageCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()

View File

@@ -44,6 +44,7 @@ import (
func dataSourceComputeUserListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeUserList, err := utilityComputeUserListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()

View File

@@ -44,6 +44,7 @@ import (
func dataSourceComputeVGPUListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
computeVGPUList, err := utilityComputeVGPUListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
@@ -75,18 +76,18 @@ func dataSourceComputeVGPUListSchemaMake() map[string]*schema.Schema {
Description: "Find by status",
},
"includedeleted": {
Type: schema.TypeBool,
Optional: true,
Type: schema.TypeBool,
Optional: true,
Description: "Include deleted computes. If using field 'status', then includedeleted will be ignored",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
@@ -94,7 +95,6 @@ func dataSourceComputeVGPUListSchemaMake() map[string]*schema.Schema {
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"entry_count": {

View File

@@ -72,6 +72,7 @@ func flattenInterfaces(interfaces compute.ListInterfaces) []map[string]interface
"conn_id": interfaceItem.ConnID,
"conn_type": interfaceItem.ConnType,
"def_gw": interfaceItem.DefGW,
"enabled": interfaceItem.Enabled,
"flip_group_id": interfaceItem.FLIPGroupID,
"guid": interfaceItem.GUID,
"ip_address": interfaceItem.IPAddress,
@@ -362,7 +363,7 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) er
d.Set("stateless_sep_id", computeRec.StatelessSepID)
d.Set("stateless_sep_type", computeRec.StatelessSepType)
d.Set("status", computeRec.Status)
d.Set("tags", flattenTags(computeRec.Tags))
// d.Set("tags", flattenTags(computeRec.Tags))
d.Set("tech_status", computeRec.TechStatus)
d.Set("updated_by", computeRec.UpdatedBy)
d.Set("updated_time", computeRec.UpdatedTime)
@@ -554,6 +555,7 @@ func flattenDataCompute(d *schema.ResourceData, computeRec compute.RecordCompute
d.Set("guid", computeRec.GUID)
d.Set("compute_id", computeRec.ID)
d.Set("image_id", computeRec.ImageID)
d.Set("image_name", computeRec.ImageName)
d.Set("interfaces", flattenInterfaces(computeRec.Interfaces))
d.Set("lock_status", computeRec.LockStatus)
d.Set("manager_id", computeRec.ManagerID)
@@ -653,19 +655,19 @@ func flattenSnapshotUsage(computeSnapshotUsages compute.ListUsageSnapshots) []ma
return res
}
func flattenSnapshotList(computeSnapshotUsages *compute.ListSnapShots) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computeSnapshotUsages.Data))
for _, computeUsage := range computeSnapshotUsages.Data {
temp := map[string]interface{}{
"disks": computeUsage.Disks,
"guid": computeUsage.GUID,
"label": computeUsage.Label,
"timestamp": computeUsage.Timestamp,
}
res = append(res, temp)
}
return res
}
// func flattenSnapshotList(computeSnapshotUsages *compute.ListSnapShots) []map[string]interface{} {
// res := make([]map[string]interface{}, 0, len(computeSnapshotUsages.Data))
// for _, computeUsage := range computeSnapshotUsages.Data {
// temp := map[string]interface{}{
// "disks": computeUsage.Disks,
// "guid": computeUsage.GUID,
// "label": computeUsage.Label,
// "timestamp": computeUsage.Timestamp,
// }
// res = append(res, temp)
// }
// return res
// }
func flattenVGPU(m []interface{}) []string {
output := []string{}
@@ -703,4 +705,4 @@ func flattenPCIDevice(m []interface{}) []string {
}
}
return output
}
}

View File

@@ -155,6 +155,32 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
createReqPPC.Interfaces = make([]kvmppc.Interface, 0)
if networks, ok := d.GetOk("network"); ok {
if networks.(*schema.Set).Len() > 0 {
ns := networks.(*schema.Set).List()
interfaces := make([]kvmppc.Interface, 0)
for _, elem := range ns {
netInterfaceVal := elem.(map[string]interface{})
reqInterface := kvmppc.Interface{
NetType: netInterfaceVal["net_type"].(string),
NetID: uint64(netInterfaceVal["net_id"].(int)),
}
ipaddr, ipSet := netInterfaceVal["ip_address"]
if ipSet {
reqInterface.IPAddr = ipaddr.(string)
}
interfaces = append(interfaces, reqInterface)
}
createReqPPC.Interfaces = interfaces
}
}
argVal, ok = d.GetOk("cloud_init")
if ok {
userdata := argVal.(string)
@@ -242,33 +268,79 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
// Note bene: we created compute in a STOPPED state (this is required to properly attach 1st network interface),
// now we need to start it before we report the sequence complete
if d.Get("started").(bool) {
req := compute.StartRequest{ComputeID: computeId}
log.Debugf("resourceComputeCreate: starting Compute ID %d after completing its resource configuration", computeId)
if _, err := c.CloudAPI().Compute().Start(ctx, req); err != nil {
warnings.Add(err)
}
}
if disks, ok := d.GetOk("disks"); ok {
log.Debugf("resourceComputeCreate: Create disks on ComputeID: %d", computeId)
addedDisks := disks.([]interface{})
if len(addedDisks) > 0 {
for _, disk := range addedDisks {
diskConv := disk.(map[string]interface{})
req := compute.DiskAddRequest{
ComputeID: computeId,
DiskName: diskConv["disk_name"].(string),
Size: uint64(diskConv["size"].(int)),
}
if enabled, ok := d.GetOk("enabled"); ok {
if enabled.(bool) {
req := compute.EnableRequest{ComputeID: computeId}
log.Debugf("resourceComputeCreate: enable=%t Compute ID %d after completing its resource configuration", enabled, computeId)
if _, err := c.CloudAPI().Compute().Enable(ctx, req); err != nil {
warnings.Add(err)
}
} else {
req := compute.DisableRequest{ComputeID: computeId}
log.Debugf("resourceComputeCreate: enable=%t Compute ID %d after completing its resource configuration", enabled, computeId)
if _, err := c.CloudAPI().Compute().Disable(ctx, req); err != nil {
warnings.Add(err)
if diskConv["sep_id"].(int) != 0 {
req.SepID = uint64(diskConv["sep_id"].(int))
}
if diskConv["disk_type"].(string) != "" {
req.DiskType = diskConv["disk_type"].(string)
}
if diskConv["pool"].(string) != "" {
req.Pool = diskConv["pool"].(string)
}
if diskConv["desc"].(string) != "" {
req.Description = diskConv["desc"].(string)
}
if diskConv["image_id"].(int) != 0 {
req.ImageID = uint64(diskConv["image_id"].(int))
}
_, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
if err != nil {
cleanup = true
return diag.FromErr(err)
}
}
}
}
if !cleanup {
if enabled, ok := d.GetOk("enabled"); ok {
if enabled.(bool) {
req := compute.EnableRequest{ComputeID: computeId}
log.Debugf("resourceComputeCreate: enable=%t Compute ID %d after completing its resource configuration", enabled, computeId)
if _, err := c.CloudAPI().Compute().Enable(ctx, req); err != nil {
warnings.Add(err)
}
} else {
req := compute.DisableRequest{ComputeID: computeId}
log.Debugf("resourceComputeCreate: enable=%t Compute ID %d after completing its resource configuration", enabled, computeId)
if _, err := c.CloudAPI().Compute().Disable(ctx, req); err != nil {
warnings.Add(err)
}
}
}
// Note bene: we created compute in a STOPPED state (this is required to properly attach 1st network interface),
// now we need to start it before we report the sequence complete
if start, ok := d.GetOk("started"); ok {
if start.(bool) {
req := compute.StartRequest{ComputeID: computeId}
log.Debugf("resourceComputeCreate: starting Compute ID %d after completing its resource configuration", computeId)
if _, err := c.CloudAPI().Compute().Start(ctx, req); err != nil {
warnings.Add(err)
}
}
if !start.(bool) {
req := compute.StopRequest{ComputeID: computeId}
log.Debugf("resourceComputeCreate: stoping Compute ID %d after completing its resource configuration", computeId)
if _, err := c.CloudAPI().Compute().Stop(ctx, req); err != nil {
warnings.Add(err)
}
}
}
if affinityLabel, ok := d.GetOk("affinity_label"); ok {
req := compute.AffinityLabelSetRequest{
ComputeID: computeId,
@@ -281,41 +353,6 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if disks, ok := d.GetOk("disks"); ok {
log.Debugf("resourceComputeCreate: Create disks on ComputeID: %d", computeId)
addedDisks := disks.([]interface{})
if len(addedDisks) > 0 {
for _, disk := range addedDisks {
diskConv := disk.(map[string]interface{})
req := compute.DiskAddRequest{
ComputeID: computeId,
DiskName: diskConv["disk_name"].(string),
Size: uint64(diskConv["size"].(int)),
SepID: uint64(diskConv["sep_id"].(int)),
}
if diskConv["disk_type"].(string) != "" {
req.DiskType = diskConv["disk_type"].(string)
}
if diskConv["pool"].(string) != "" {
req.Pool = diskConv["pool"].(string)
}
if diskConv["desc"].(string) != "" {
req.Description = diskConv["desc"].(string)
}
if diskConv["image_id"].(int) != 0 {
req.ImageID = uint64(diskConv["image_id"].(int))
}
_, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
if err != nil {
cleanup = true
return diag.FromErr(err)
}
}
}
}
if ars, ok := d.GetOk("affinity_rules"); ok {
log.Debugf("resourceComputeCreate: Create affinity rules on ComputeID: %d", computeId)
addedAR := ars.([]interface{})
@@ -361,133 +398,134 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
}
}
if tags, ok := d.GetOk("tags"); ok {
log.Debugf("resourceComputeCreate: Create tags on ComputeID: %d", computeId)
addedTags := tags.(*schema.Set).List()
if len(addedTags) > 0 {
for _, tagInterface := range addedTags {
tagItem := tagInterface.(map[string]interface{})
req := compute.TagAddRequest{
if tags, ok := d.GetOk("tags"); ok {
log.Debugf("resourceComputeCreate: Create tags on ComputeID: %d", computeId)
addedTags := tags.(*schema.Set).List()
if len(addedTags) > 0 {
for _, tagInterface := range addedTags {
tagItem := tagInterface.(map[string]interface{})
req := compute.TagAddRequest{
ComputeID: computeId,
Key: tagItem["key"].(string),
Value: tagItem["value"].(string),
}
_, err := c.CloudAPI().Compute().TagAdd(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
}
if pfws, ok := d.GetOk("port_forwarding"); ok {
log.Debugf("resourceComputeCreate: Create port farwarding on ComputeID: %d", computeId)
addedPfws := pfws.(*schema.Set).List()
if len(addedPfws) > 0 {
for _, pfwInterface := range addedPfws {
pfwItem := pfwInterface.(map[string]interface{})
req := compute.PFWAddRequest{
ComputeID: computeId,
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
LocalBasePort: uint64(pfwItem["local_port"].(int)),
Proto: pfwItem["proto"].(string),
}
if pfwItem["public_port_end"].(int) != 0 {
req.PublicPortEnd = int64(pfwItem["public_port_end"].(int))
}
_, err := c.CloudAPI().Compute().PFWAdd(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
}
if userAcess, ok := d.GetOk("user_access"); ok {
log.Debugf("resourceComputeCreate: Create user access on ComputeID: %d", computeId)
usersAcess := userAcess.(*schema.Set).List()
if len(usersAcess) > 0 {
for _, userAcessInterface := range usersAcess {
userAccessItem := userAcessInterface.(map[string]interface{})
req := compute.UserGrantRequest{
ComputeID: computeId,
Username: userAccessItem["username"].(string),
AccessType: userAccessItem["access_type"].(string),
}
_, err := c.CloudAPI().Compute().UserGrant(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
}
if snapshotList, ok := d.GetOk("snapshot"); ok {
log.Debugf("resourceComputeCreate: Create snapshot on ComputeID: %d", computeId)
snapshots := snapshotList.(*schema.Set).List()
if len(snapshots) > 0 {
for _, snapshotInterface := range snapshots {
snapshotItem := snapshotInterface.(map[string]interface{})
req := compute.SnapshotCreateRequest{
ComputeID: computeId,
Label: snapshotItem["label"].(string),
}
_, err := c.CloudAPI().Compute().SnapshotCreate(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
}
if cdtList, ok := d.GetOk("cd"); ok {
log.Debugf("resourceComputeCreate: Create cd on ComputeID: %d", computeId)
cds := cdtList.(*schema.Set).List()
if len(cds) > 0 {
snapshotItem := cds[0].(map[string]interface{})
req := compute.CDInsertRequest{
ComputeID: computeId,
Key: tagItem["key"].(string),
Value: tagItem["value"].(string),
CDROMID: uint64(snapshotItem["cdrom_id"].(int)),
}
_, err := c.CloudAPI().Compute().TagAdd(ctx, req)
_, err := c.CloudAPI().Compute().CDInsert(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
}
if pfws, ok := d.GetOk("port_forwarding"); ok {
log.Debugf("resourceComputeCreate: Create port farwarding on ComputeID: %d", computeId)
addedPfws := pfws.(*schema.Set).List()
if len(addedPfws) > 0 {
for _, pfwInterface := range addedPfws {
pfwItem := pfwInterface.(map[string]interface{})
req := compute.PFWAddRequest{
ComputeID: computeId,
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
PublicPortEnd: int64(pfwItem["public_port_end"].(int)),
LocalBasePort: uint64(pfwItem["local_port"].(int)),
Proto: pfwItem["proto"].(string),
}
_, err := c.CloudAPI().Compute().PFWAdd(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
}
if userAcess, ok := d.GetOk("user_access"); ok {
log.Debugf("resourceComputeCreate: Create user access on ComputeID: %d", computeId)
usersAcess := userAcess.(*schema.Set).List()
if len(usersAcess) > 0 {
for _, userAcessInterface := range usersAcess {
userAccessItem := userAcessInterface.(map[string]interface{})
req := compute.UserGrantRequest{
ComputeID: computeId,
Username: userAccessItem["username"].(string),
AccessType: userAccessItem["access_type"].(string),
}
_, err := c.CloudAPI().Compute().UserGrant(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
}
if snapshotList, ok := d.GetOk("snapshot"); ok {
log.Debugf("resourceComputeCreate: Create snapshot on ComputeID: %d", computeId)
snapshots := snapshotList.(*schema.Set).List()
if len(snapshots) > 0 {
for _, snapshotInterface := range snapshots {
snapshotItem := snapshotInterface.(map[string]interface{})
req := compute.SnapshotCreateRequest{
ComputeID: computeId,
Label: snapshotItem["label"].(string),
}
_, err := c.CloudAPI().Compute().SnapshotCreate(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
}
if cdtList, ok := d.GetOk("cd"); ok {
log.Debugf("resourceComputeCreate: Create cd on ComputeID: %d", computeId)
cds := cdtList.(*schema.Set).List()
if len(cds) > 0 {
snapshotItem := cds[0].(map[string]interface{})
req := compute.CDInsertRequest{
if d.Get("pin_to_stack").(bool) {
req := compute.PinToStackRequest{
ComputeID: computeId,
CDROMID: uint64(snapshotItem["cdrom_id"].(int)),
}
_, err := c.CloudAPI().Compute().PinToStack(ctx, req)
if err != nil {
warnings.Add(err)
}
}
_, err := c.CloudAPI().Compute().CDInsert(ctx, req)
if d.Get("pause").(bool) {
req := compute.PauseRequest{
ComputeID: computeId,
}
_, err := c.CloudAPI().Compute().Pause(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
if d.Get("pin_to_stack").(bool) == true {
req := compute.PinToStackRequest{
ComputeID: computeId,
}
_, err := c.CloudAPI().Compute().PinToStack(ctx, req)
if err != nil {
warnings.Add(err)
}
}
if d.Get("pause").(bool) == true {
req := compute.PauseRequest{
ComputeID: computeId,
}
_, err := c.CloudAPI().Compute().Pause(ctx, req)
if err != nil {
warnings.Add(err)
}
}
log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", computeId, d.Get("name").(string))
// We may reuse dataSourceComputeRead here as we maintain similarity
// between Compute resource and Compute data source schemas
// Compute read function will also update resource ID on success, so that Terraform
// will know the resource exists
defer resourceComputeRead(ctx, d, m)
return warnings.Get()
return append(warnings.Get(), resourceComputeRead(ctx, d, m)...)
}
func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@@ -498,6 +536,7 @@ func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interfac
computeRec, err := utilityComputeCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@@ -601,6 +640,76 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
return diag.FromErr(err)
}
hasChanged := false
// check compute statuses
switch computeRec.Status {
case status.Deleted:
if restore, ok := d.GetOk("restore"); ok && restore.(bool) {
restoreReq := compute.RestoreRequest{ComputeID: computeRec.ID}
_, err := c.CloudAPI().Compute().Restore(ctx, restoreReq)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
}
if enabled, ok := d.GetOk("enabled"); ok {
if enabled.(bool) {
enableReq := compute.EnableRequest{ComputeID: computeRec.ID}
_, err = c.CloudAPI().Compute().Enable(ctx, enableReq)
if err != nil {
return diag.FromErr(err)
}
}
if !enabled.(bool) {
enableReq := compute.DisableRequest{ComputeID: computeRec.ID}
_, err = c.CloudAPI().Compute().Disable(ctx, enableReq)
if err != nil {
return diag.FromErr(err)
}
}
}
if start, ok := d.GetOk("started"); ok {
if start.(bool) {
req := compute.StartRequest{ComputeID: computeRec.ID}
if _, err := c.CloudAPI().Compute().Start(ctx, req); err != nil {
return diag.FromErr(err)
}
}
if !start.(bool) {
req := compute.StopRequest{ComputeID: computeRec.ID}
if _, err := c.CloudAPI().Compute().Stop(ctx, req); err != nil {
return diag.FromErr(err)
}
}
}
hasChanged = true
case status.Destroyed:
d.SetId("")
return diag.Errorf("The resource cannot be updated because it has been destroyed")
// return resourceComputeCreate(ctx, d, m)
case status.Disabled:
log.Debugf("The compute is in status: %s, may troubles can be occured with update. Please, enable compute first.", computeRec.Status)
case status.Redeploying:
case status.Deleting:
case status.Destroying:
return diag.Errorf("The compute is in progress with status: %s", computeRec.Status)
case status.Modeled:
return diag.Errorf("The compute is in status: %s, please, contant the support for more information", computeRec.Status)
}
if hasChanged {
computeRec, err = utilityComputeCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("enabled") {
enabled := d.Get("enabled").(bool)
if enabled {
@@ -623,34 +732,22 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
log.Debugf("resourceComputeUpdate: enable=%s Compute ID %v after completing its resource configuration", d.Id(), enabled)
}
// check compute statuses
switch computeRec.Status {
case status.Deleted:
restoreReq := compute.RestoreRequest{ComputeID: computeRec.ID}
enableReq := compute.EnableRequest{ComputeID: computeRec.ID}
if d.HasChange("started") {
start := d.Get("started").(bool)
if start {
req := compute.StartRequest{ComputeID: computeRec.ID}
_, err := c.CloudAPI().Compute().Restore(ctx, restoreReq)
if err != nil {
return diag.FromErr(err)
if _, err := c.CloudAPI().Compute().Start(ctx, req); err != nil {
return diag.FromErr(err)
}
}
if !start {
req := compute.StopRequest{ComputeID: computeRec.ID}
_, err = c.CloudAPI().Compute().Enable(ctx, enableReq)
if err != nil {
return diag.FromErr(err)
if _, err := c.CloudAPI().Compute().Stop(ctx, req); err != nil {
return diag.FromErr(err)
}
}
case status.Destroyed:
d.SetId("")
return diag.Errorf("The resource cannot be updated because it has been destroyed")
// return resourceComputeCreate(ctx, d, m)
case status.Disabled:
log.Debugf("The compute is in status: %s, may troubles can be occured with update. Please, enable compute first.", computeRec.Status)
case status.Redeploying:
case status.Deleting:
case status.Destroying:
return diag.Errorf("The compute is in progress with status: %s", computeRec.Status)
case status.Modeled:
return diag.Errorf("The compute is in status: %s, please, contant the support for more information", computeRec.Status)
}
doUpdate := false
@@ -729,14 +826,16 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if d.HasChange("description") || d.HasChange("name") {
if d.HasChanges("description", "name") {
req := compute.UpdateRequest{
ComputeID: computeRec.ID,
Name: d.Get("name").(string),
}
if desc, ok := d.GetOk("description"); ok {
req.Description = desc.(string)
if d.HasChange("name") {
req.Name = d.Get("name").(string)
}
if d.HasChange("description") {
req.Description = d.Get("description").(string)
}
if _, err := c.CloudAPI().Compute().Update(ctx, req); err != nil {
@@ -835,9 +934,11 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
ComputeID: computeRec.ID,
DiskName: diskConv["disk_name"].(string),
Size: uint64(diskConv["size"].(int)),
SepID: uint64(diskConv["sep_id"].(int)),
}
if diskConv["sep_id"].(int) != 0 {
req.SepID = uint64(diskConv["sep_id"].(int))
}
if diskConv["disk_type"].(string) != "" {
req.DiskType = diskConv["disk_type"].(string)
}
@@ -1267,7 +1368,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
if d.HasChange("pin_to_stack") {
oldPin, newPin := d.GetChange("pin_to_stack")
if oldPin.(bool) == true && newPin.(bool) == false {
if !newPin.(bool) {
req := compute.UnpinFromStackRequest{
ComputeID: computeRec.ID,
}
@@ -1277,7 +1378,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
return diag.FromErr(err)
}
}
if oldPin.(bool) == false && newPin.(bool) == true {
if !oldPin.(bool) {
req := compute.PinToStackRequest{
ComputeID: computeRec.ID,
}
@@ -1291,7 +1392,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
if d.HasChange("pause") {
oldPause, newPause := d.GetChange("pause")
if oldPause.(bool) == true && newPause.(bool) == false {
if !newPause.(bool) {
req := compute.ResumeRequest{
ComputeID: computeRec.ID,
}
@@ -1300,7 +1401,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
return diag.FromErr(err)
}
}
if oldPause.(bool) == false && newPause.(bool) == true {
if !oldPause.(bool) {
req := compute.PauseRequest{
ComputeID: computeRec.ID,
}
@@ -1313,8 +1414,8 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
}
if d.HasChange("reset") {
oldReset, newReset := d.GetChange("reset")
if oldReset.(bool) == false && newReset.(bool) == true {
_, newReset := d.GetChange("reset")
if newReset.(bool) {
req := compute.ResetRequest{
ComputeID: computeRec.ID,
}
@@ -1394,8 +1495,8 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
// we may reuse dataSourceComputeRead here as we maintain similarity
// between Compute resource and Compute data source schemas
defer resourceComputeRead(ctx, d, m)
return warnings.Get()
return append(warnings.Get(), resourceComputeRead(ctx, d, m)...)
}
func isChangeDisk(els []interface{}, el interface{}) bool {
@@ -1456,6 +1557,8 @@ func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interf
return diag.FromErr(err)
}
d.SetId("")
return nil
}
@@ -1504,7 +1607,6 @@ func disksSubresourceSchemaMake() map[string]*schema.Schema {
},
"permanently": {
Type: schema.TypeBool,
Computed: true,
Optional: true,
Description: "Disk deletion status",
},
@@ -1877,6 +1979,12 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Default: false,
},
"restore": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"auto_start": {
Type: schema.TypeBool,
Optional: true,

View File

@@ -34,18 +34,17 @@ package kvmvm
import (
"context"
"regexp"
"strconv"
log "github.com/sirupsen/logrus"
"regexp"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func matchComputes(computeList *compute.ListComputes) *compute.ListComputes {
matched, _ := regexp.Compile("[a-zA-Z]+\\d+-[a-zA-Z]+\\d+-[a-zA-Z]+\\d+")
matched, _ := regexp.Compile(`[a-zA-Z]+\\d+-[a-zA-Z]+\\d+-[a-zA-Z]+\\d+`)
result := computeList.FilterFunc(func(ic compute.ItemCompute) bool {
res := matched.Match([]byte(ic.Name))
return !res
@@ -176,17 +175,6 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
old_set, new_set := d.GetChange("network")
req := compute.StopRequest{
ComputeID: computeID,
Force: true,
}
log.Debugf("utilityComputeNetworksConfigure: stopping compute %d", computeID)
_, err := c.CloudAPI().Compute().Stop(ctx, req)
if err != nil {
return err
}
apiErrCount := 0
var lastSavedError error
@@ -246,6 +234,17 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
}
}
needStart := false
if d.Get("network").(*schema.Set).Len() == 1 || old_set.(*schema.Set).Len() < 1 {
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
if err := utilityComputeStop(ctx, computeId, m); err != nil {
apiErrCount++
lastSavedError = err
}
needStart = true
}
attach_set := new_set.(*schema.Set).Difference(old_set.(*schema.Set))
log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", attach_set.Len(), d.Id())
for _, runner := range attach_set.List() {
@@ -270,13 +269,12 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
}
}
startReq := compute.StartRequest{ComputeID: computeID}
log.Debugf("utilityComputeNetworksConfigure: starting compute %d", computeID)
_, err = c.CloudAPI().Compute().Start(ctx, startReq)
if err != nil {
apiErrCount++
lastSavedError = err
if needStart {
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
if numErr, err := utilityComputeStart(ctx, computeId, m); err != nil {
apiErrCount += numErr
lastSavedError = err
}
}
if apiErrCount > 0 {
@@ -302,3 +300,30 @@ func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m
return *computeRecord, nil
}
func utilityComputeStop(ctx context.Context, computeID uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
req := compute.StopRequest{
ComputeID: computeID,
Force: true,
}
log.Debugf("utilityComputeNetworksConfigure: stopping compute %d", computeID)
_, err := c.CloudAPI().Compute().Stop(ctx, req)
if err != nil {
return err
}
return nil
}
func utilityComputeStart(ctx context.Context, computeID uint64, m interface{}) (int, error) {
c := m.(*controller.ControllerCfg)
startReq := compute.StartRequest{ComputeID: computeID}
log.Debugf("utilityComputeNetworksConfigure: starting compute %d", computeID)
_, err := c.CloudAPI().Compute().Start(ctx, startReq)
if err != nil {
return 1, err
}
return 0, nil
}