Rework and implementation of extended compute resource management

rc-1.0
Sergey Shubin svs1370 4 years ago
parent 422658542c
commit 7031f0fe2d

@ -32,3 +32,6 @@ const MaxSshKeysPerCompute=12
// MaxExtraDisksPerCompute sets maximum number of extra disks that can be added when creating new compute
const MaxExtraDisksPerCompute=12
// MaxNetworksPerCompute sets maximum number of vNICs per compute
const MaxNetworksPerCompute=8

@ -120,7 +120,7 @@ func flattenCompute(d *schema.ResourceData, compFacts string) error {
return err
}
log.Debugf("flattenCompute: ID %d, RgID %d", model.ID, model.RgID)
log.Debugf("flattenCompute: ID %d, RG ID %d", model.ID, model.RgID)
d.SetId(fmt.Sprintf("%d", model.ID))
d.Set("compute_id", model.ID)

@ -56,7 +56,7 @@ func flattenResgroup(d *schema.ResourceData, rg_facts string) error {
d.Set("account_name", details.AccountName)
d.Set("account_id", details.AccountID)
d.Set("grid_id", details.GridID)
d.Set("desc", details.Desc)
d.Set("description", details.Desc)
d.Set("status", details.Status)
d.Set("def_net_type", details.DefaultNetType)
d.Set("def_net_id", details.DefaultNetID)
@ -119,7 +119,7 @@ func dataSourceResgroup() *schema.Resource {
Description: "Unique ID of the account, which this resource group belongs to. If account ID is specified, then account name is ignored.",
},
"desc": &schema.Schema{
"description": &schema.Schema{
Type: schema.TypeString,
Computed: true,
Description: "User-defined text description of this resource group.",

@ -26,7 +26,7 @@ import (
// ID, type, name, size, account ID, SEP ID, SEP type, pool, status, tech status, compute ID, image ID
func diskSubresourceSchema() map[string]*schema.Schema {
func diskSubresourceSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"name": {
Type: schema.TypeString,

@ -0,0 +1,55 @@
/*
Copyright (c) 2019-2021 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package decort
import (
// "encoding/json"
// "fmt"
// "log"
// "net/url"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
// "github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
// This is subresource of compute resource used when creating/managing compute network connections
func networkSubresourceSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"net_type": &schema.Schema{
Type: schema.TypeString,
Required: true,
Description: "Type of the network for this connection, either EXTNET or VINS.",
},
"net_id": &schema.Schema{
Type: schema.TypeInt,
Required: true,
Description: "ID of the network for this connection.",
},
"ipaddr": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "Optional IP address to assign to this connection. This IP should belong to the selected network and free for use.",
},
}
return rets
}

@ -37,274 +37,153 @@ import (
)
func resourceComputeCreate(d *schema.ResourceData, m interface{}) error {
/*
machine := &MachineConfig{
ResGroupID: d.Get("rgid").(int),
Name: d.Get("name").(string),
Cpu: d.Get("cpu").(int),
Ram: d.Get("ram").(int),
ImageID: d.Get("image_id").(int),
Description: d.Get("description").(string),
}
// BootDisk
// DataDisks
// Networks
// PortForwards
// SshKeyData string
log.Printf("resourceComputeCreate: called for VM name %q, ResGroupID %d", machine.Name, machine.ResGroupID)
var subres_list []interface{}
var subres_data map[string]interface{}
var arg_value interface{}
var arg_set bool
// boot disk list is a required argument and has only one element,
// which is of type diskSubresourceSchema
subres_list = d.Get("boot_disk").([]interface{})
subres_data = subres_list[0].(map[string]interface{})
machine.BootDisk.Label = subres_data["label"].(string)
machine.BootDisk.Size = subres_data["size"].(int)
machine.BootDisk.Pool = subres_data["pool"].(string)
machine.BootDisk.Provider = subres_data["provider"].(string)
arg_value, arg_set = d.GetOk("data_disks")
if arg_set {
log.Printf("resourceComputeCreate: calling makeDisksConfig")
machine.DataDisks, _ = makeDisksConfig(arg_value.([]interface{}))
}
// we assume all mandatiry parameters it takes to create a comptue instance are properly
// specified - we rely on schema "Required" attributes to let Terraform validate them for us
arg_value, arg_set = d.GetOk("networks")
if arg_set {
log.Printf("resourceComputeCreate: calling makeNetworksConfig")
machine.Networks, _ = makeNetworksConfig(arg_value.([]interface{}))
}
log.Debugf("resourceComputeCreate: called for Compute name %q, RG ID %d", d.Get("name").(string), d.Get("rg_id").(int))
// create basic Compute (i.e. without extra disks and network connections - those will be attached
// by subsequent individual API calls).
// creating Compute is a multi-step workflow, which may fail at some step, so we use "partial" feature of Terraform
d.Partial(true)
controller := m.(*ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("rgId", fmt.Sprintf("%d", d.Get("rg_id").(int)))
urlValues.Add("name", d.Get("name").(string))
urlValues.Add("cpu", fmt.Sprintf("%d", d.Get("cpu").(int)))
urlValues.Add("ram", fmt.Sprintf("%d", d.Get("ram").(int)))
urlValues.Add("imageId", fmt.Sprintf("%d", d.Get("image_id").(int)))
urlValues.Add("bootDisk", fmt.Sprintf("%d", d.Get("boot_disk_size").(int)))
urlValues.Add("netType", "NONE") // at the 1st step create isolated compute
// urlValues.Add("start", "false") // at the 1st step create compute in a stopped state
arg_value, arg_set = d.GetOk("port_forwards")
if arg_set {
log.Printf("resourceComputeCreate: calling makePortforwardsConfig")
machine.PortForwards, _ = makePortforwardsConfig(arg_value.([]interface{}))
argVal, argSet := d.GetOk("description")
if argSet {
urlValues.Add("desc", argVal.(string))
}
arg_value, arg_set = d.GetOk("ssh_keys")
if arg_set {
log.Printf("resourceComputeCreate: calling makeSshKeysConfig")
machine.SshKeys, _ = makeSshKeysConfig(arg_value.([]interface{}))
sshKeysVal, sshKeysSet := d.GetOk("ssh_keys")
if sshKeysSet {
// process SSH Key settings and set API values accordingly
log.Debugf("resourceComputeCreate: calling makeSshKeysArgString to setup SSH keys for guest login(s)")
urlValues.Add("userdata", makeSshKeysArgString(sshKeysVal.([]interface{})))
}
// create basic VM (i.e. without port forwards and ext network connections - those will be done
// by separate API calls)
d.Partial(true)
controller := m.(*ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("cloudspaceId", fmt.Sprintf("%d", machine.ResGroupID))
urlValues.Add("name", machine.Name)
urlValues.Add("description", machine.Description)
urlValues.Add("vcpus", fmt.Sprintf("%d", machine.Cpu))
urlValues.Add("memory", fmt.Sprintf("%d", machine.Ram))
urlValues.Add("imageId", fmt.Sprintf("%d", machine.ImageID))
urlValues.Add("disksize", fmt.Sprintf("%d", machine.BootDisk.Size))
if len(machine.SshKeys) > 0 {
urlValues.Add("userdata", makeSshKeysArgString(machine.SshKeys))
computeCreateAPI := KvmX86CreateAPI
arch := d.Get("arch").(string)
if arch == "KVM_PPC" {
computeCreateAPI = KvmPPCCreateAPI
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM PowerPC")
} else { // note that we do not validate arch value for explicit "KVM_X86" here
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
}
api_resp, err := controller.decortAPICall("POST", MachineCreateAPI, urlValues)
apiResp, err := controller.decortAPICall("POST", computeCreateAPI, urlValues)
if err != nil {
return err
}
d.SetId(api_resp) // machines/create API plainly returns ID of the new VM on success
machine.ID, _ = strconv.Atoi(api_resp)
// Compute create API returns ID of the new Compute instance on success
d.SetId(apiResp) // update ID of the resource to tell Terraform that the resource exists, albeit partially
compId, _ := strconv.Atoi(apiResp)
d.SetPartial("name")
d.SetPartial("description")
d.SetPartial("cpu")
d.SetPartial("ram")
d.SetPartial("image_id")
d.SetPartial("boot_disk")
if len(machine.SshKeys) > 0 {
d.SetPartial("boot_disk_size")
if sshKeysSet {
d.SetPartial("ssh_keys")
}
log.Printf("resourceComputeCreate: new VM ID %d, name %q created", machine.ID, machine.Name)
if len(machine.DataDisks) > 0 || len(machine.PortForwards) > 0 {
// for data disk or port foreards provisioning we have to know Tenant ID
// and Grid ID so we call utilityResgroupConfigGet method to populate these
// fields in the machine structure that will be passed to provisionVmDisks or
// provisionVmPortforwards
log.Printf("resourceComputeCreate: calling utilityResgroupConfigGet")
resgroup, err := controller.utilityResgroupConfigGet(machine.ResGroupID)
if err == nil {
machine.TenantID = resgroup.TenantID
machine.GridID = resgroup.GridID
machine.ExtIP = resgroup.ExtIP
log.Printf("resourceComputeCreate: tenant ID %d, GridID %d, ExtIP %q",
machine.TenantID, machine.GridID, machine.ExtIP)
}
}
log.Debugf("resourceComputeCreate: new simple Compute ID %d, name %q created", compId, d.Get("name").(string))
//
// Configure data disks
disks_ok := true
if len(machine.DataDisks) > 0 {
log.Printf("resourceComputeCreate: calling utilityVmDisksProvision for disk count %d", len(machine.DataDisks))
if machine.TenantID == 0 {
// if TenantID is still 0 it means that we failed to get Resgroup Facts by
// a previous call to utilityResgroupGetFacts,
// hence we do not have technical ability to provision data disks
disks_ok = false
} else {
// provisionVmDisks accomplishes two steps for each data disk specification
// 1) creates the disks
// 2) attaches them to the VM
err = controller.utilityVmDisksProvision(machine)
// Configure data disks if any
extraDisksOk := true
argVal, argSet = d.GetOk("extra_disks")
if argSet && len(argVal.([]interface{})) > 0 {
// urlValues.Add("desc", argVal.(string))
log.Debugf("resourceComputeCreate: calling utilityComputeExtraDisksConfigure to attach %d extra disk(s)", len(argVal.([]interface{})))
err = controller.utilityComputeExtraDisksConfigure(d, false) // do_delta=false, as we are working on a new compute
if err != nil {
disks_ok = false
log.Errorf("resourceComputeCreate: error when attaching extra disks to a new Compute ID %s: %s", compId, err)
extraDisksOk = false
}
}
if extraDisksOk {
d.SetPartial("extra_disks")
}
if disks_ok {
d.SetPartial("data_disks")
}
//
// Configure port forward rules
pfws_ok := true
if len(machine.PortForwards) > 0 {
log.Printf("resourceComputeCreate: calling utilityVmPortforwardsProvision for pfw rules count %d", len(machine.PortForwards))
if machine.ExtIP == "" {
// if ExtIP is still empty it means that we failed to get Resgroup Facts by
// a previous call to utilityResgroupGetFacts,
// hence we do not have technical ability to provision port forwards
pfws_ok = false
} else {
err := controller.utilityVmPortforwardsProvision(machine)
if err != nil {
pfws_ok = false
}
}
}
if pfws_ok {
// there were no errors reported when configuring port forwards
d.SetPartial("port_forwards")
}
//
// Configure external networks
// NOTE: currently only one external network can be attached to each VM, so in the current
// implementation we ignore all but the 1st network definition
nets_ok := true
if len(machine.Networks) > 0 {
log.Printf("resourceComputeCreate: calling utilityVmNetworksProvision for networks count %d", len(machine.Networks))
err := controller.utilityVmNetworksProvision(machine)
// Configure external networks if any
netsOk := true
argVal, argSet = d.GetOk("networks")
if argSet && len(argVal.([]interface{})) > 0 {
log.Debugf("resourceComputeCreate: calling utilityComputeNetworksConfigure to attach %d network(s)", len(argVal.([]interface{})))
err = controller.utilityComputeNetworksConfigure(d, false) // do_delta=false, as we are working on a new compute
if err != nil {
nets_ok = false
log.Errorf("resourceComputeCreate: error when attaching networks to a new Compute ID %d: %s", compId, err)
netsOk = false
}
}
if nets_ok {
if netsOk {
// there were no errors reported when configuring networks
d.SetPartial("networks")
}
if disks_ok && nets_ok && pfws_ok {
if extraDisksOk && netsOk {
// if there were no errors in setting any of the subresources, we may leave Partial mode
d.Partial(false)
}
*/
// resourceComputeRead will also update resource ID on success, so that Terraform will know
// that resource exists
return resourceComputeRead(d, m)
log.Debugf("resourceComputeCreate: new Compute ID %d, name %q creation sequence complete", compId, d.Get("name").(string))
// We may reuse dataSourceComputeRead here as we maintain similarity
// between Compute resource and Compute data source schemas
// Compute read function will also update resource ID on success, so that Terraform
// will know the resource exists
return dataSourceComputeRead(d, m)
}
func resourceComputeRead(d *schema.ResourceData, m interface{}) error {
log.Printf("resourceComputeRead: called for VM name %q, ResGroupID %d",
d.Get("name").(string), d.Get("rgid").(int))
log.Debugf("resourceComputeRead: called for Compute name %q, RG ID %d",
d.Get("name").(string), d.Get("rg_id").(int))
compFacts, err := utilityComputeCheckPresence(d, m)
if compFacts == "" {
if err != nil {
return err
}
// VM was not found
// Compute with such name and RG ID was not found
return nil
}
if err = flattenCompute(d, compFacts); err != nil {
return err
}
log.Printf("resourceComputeRead: after flattenCompute: VM ID %s, VM name %q, ResGroupID %d",
d.Id(), d.Get("name").(string), d.Get("rgid").(int))
// Not all parameters, that we may need, are returned by machines/get API
// Continue with further reading of VM subresource parameters:
controller := m.(*ControllerCfg)
urlValues := &url.Values{}
/*
// Obtain information on external networks
urlValues.Add("machineId", d.Id())
body_string, err := controller.decortAPICall("POST", VmExtNetworksListAPI, urlValues)
if err != nil {
return err
}
net_list := ExtNetworksResp{}
err = json.Unmarshal([]byte(body_string), &net_list)
if err != nil {
return err
}
if len(net_list) > 0 {
if err = d.Set("networks", flattenNetworks(net_list)); err != nil {
return err
}
}
*/
/*
// Ext networks flattening is now done inside flattenCompute because it is currently based
// on data read into NICs component by machine/get API call
if err = d.Set("networks", flattenNetworks()); err != nil {
return err
}
*/
//
// Obtain information on port forwards
/*
urlValues.Add("cloudspaceId", fmt.Sprintf("%d", d.Get("rgid")))
urlValues.Add("machineId", d.Id())
pfw_list := PortforwardsResp{}
body_string, err := controller.decortAPICall("POST", PortforwardsListAPI, urlValues)
if err != nil {
return err
}
err = json.Unmarshal([]byte(body_string), &pfw_list)
if err != nil {
return err
}
if len(pfw_list) > 0 {
if err = d.Set("port_forwards", flattenPortforwards(pfw_list)); err != nil {
return err
}
}
*/
log.Debugf("resourceComputeRead: after flattenCompute: Compute ID %s, name %q, RG ID %d",
d.Id(), d.Get("name").(string), d.Get("rg_id").(int))
return nil
}
func resourceComputeUpdate(d *schema.ResourceData, m interface{}) error {
log.Printf("resourceComputeUpdate: called for VM name %q, ResGroupID %d",
d.Get("name").(string), d.Get("rgid").(int))
log.Debugf("resourceComputeUpdate: called for Compute name %q, RGID %d",
d.Get("name").(string), d.Get("rg_id").(int))
return resourceComputeRead(d, m)
log.Printf("resourceComputeUpdate: NOT IMPLEMENTED YET!")
// we may reuse dataSourceComputeRead here as we maintain similarity
// between Compute resource and Compute data source schemas
return dataSourceComputeRead(d, m)
}
func resourceComputeDelete(d *schema.ResourceData, m interface{}) error {
// NOTE: this method destroys target Compute instance with flag "permanently", so
// there is no way to restore destroyed Compute
log.Printf("resourceComputeDelete: called for VM name %q, ResGroupID %d",
d.Get("name").(string), d.Get("rgid").(int))
// NOTE: this function destroys target Compute instance "permanently", so
// there is no way to restore it. It also destroys all extra disks
// attached to this compute, so "User, ye be warned!"
log.Debugf("resourceComputeDelete: called for Compute name %q, RG ID %d",
d.Get("name").(string), d.Get("rg_id").(int))
compFacts, err := utilityComputeCheckPresence(d, m)
if compFacts == "" {
@ -328,8 +207,8 @@ func resourceComputeDelete(d *schema.ResourceData, m interface{}) error {
func resourceComputeExists(d *schema.ResourceData, m interface{}) (bool, error) {
// Reminder: according to Terraform rules, this function should not modify its ResourceData argument
log.Printf("resourceComputeExist: called for VM name %q, ResGroupID %d",
d.Get("name").(string), d.Get("rgid").(int))
log.Debugf("resourceComputeExist: called for Compute name %q, RG ID %d",
d.Get("name").(string), d.Get("rg_id").(int))
compFacts, err := utilityComputeCheckPresence(d, m)
if compFacts == "" {
@ -417,6 +296,16 @@ func resourceCompute() *schema.Resource {
Description: "Optional list of IDs of the extra disks to attach to this compute.",
},
"networks": {
Type: schema.TypeList,
Optional: true,
MaxItems: MaxNetworksPerCompute,
Elem: &schema.Resource{
Schema: networkSubresourceSchemaMake(),
},
Description: "Optional list of networks to attach this compute to.",
},
"ssh_keys": {
Type: schema.TypeList,
Optional: true,

@ -150,7 +150,7 @@ func resourceResgroupUpdate(d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceResgroupUpdate: name specified - looking for deltas from the old settings.")
name_old, _ := d.GetChange("name")
if name_old.(string) != name_new.(string) {
do_update := true
do_update = true
url_values.Add("name", name_new.(string))
}
}
@ -193,12 +193,12 @@ func resourceResgroupUpdate(d *schema.ResourceData, m interface{}) error {
}
}
desc_new, desc_set := d.GetOk("desc")
desc_new, desc_set := d.GetOk("description")
if desc_set {
log.Debugf("resourceResgroupUpdate: description specified - looking for deltas from the old settings.")
desc_old, _ := d.GetChange("desc")
desc_old, _ := d.GetChange("description")
if desc_old.(string) != desc_new.(string) {
do_update := true
do_update = true
url_values.Add("desc", desc_new.(string))
}
}
@ -341,7 +341,7 @@ func resourceResgroup() *schema.Resource {
Description: "Quota settings for this resource group.",
},
"desc": {
"description": {
Type: schema.TypeString,
Optional: true,
Description: "User-defined text description of this resource group.",

@ -24,6 +24,7 @@ import (
// "github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
/*
func makeSshKeysConfig(arg_list []interface{}) (sshkeys []SshKeyConfig, count int) {
count = len(arg_list)
if count < 1 {
@ -41,8 +42,12 @@ func makeSshKeysConfig(arg_list []interface{}) (sshkeys []SshKeyConfig, count in
return sshkeys, count
}
*/
func makeSshKeysArgString(arg_list []interface{}) string {
// This function expects arg_list = data.Get("ssh_keys"), where "data" is a populated schema for Compute
// Resource (see func resourceCompute() definition) or Compute Data Source (see func dataSourceCompute())
func makeSshKeysArgString(sshkeys []SshKeyConfig) string {
// Prepare a string with username and public ssh key value in a format recognized by cloud-init utility.
// It is designed to be passed as "userdata" argument of virtual machine create API call.
// The following format is expected:
@ -55,15 +60,19 @@ func makeSshKeysArgString(sshkeys []SshKeyConfig) string {
- %s\n
shell: /bin/bash`
*/
if len(sshkeys) < 1 {
if len(arg_list) < 1 {
return ""
}
out := `{"users": [`
const UserdataTemplate = `%s{"ssh-authorized-keys": ["%s"], "shell": "%s", "name": "%s"}, `
const out_suffix = `]}`
for _, elem := range sshkeys {
out = fmt.Sprintf(UserdataTemplate, out, elem.SshKey, elem.UserShell, elem.User)
for _, value := range arg_list {
subres_data := value.(map[string]interface{})
out = fmt.Sprintf(UserdataTemplate, out, subres_data["public_key"].(string), subres_data["shell"].(string), subres_data["user"].(string))
}
out = fmt.Sprintf("%s %s", out, out_suffix)
return out

@ -35,74 +35,54 @@ import (
// "github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
/*
func (ctrl *ControllerCfg) utilityVmDisksProvision(mcfg *MachineConfig) error {
for index, disk := range mcfg.DataDisks {
urlValues := &url.Values{}
// urlValues.Add("machineId", fmt.Sprintf("%d", mcfg.ID))
urlValues.Add("accountId", fmt.Sprintf("%d", mcfg.TenantID))
urlValues.Add("gid", fmt.Sprintf("%d", mcfg.GridID))
urlValues.Add("name", fmt.Sprintf("%s", disk.Label))
urlValues.Add("description", fmt.Sprintf("Data disk for VM ID %d / VM Name: %s", mcfg.ID, mcfg.Name))
urlValues.Add("size", fmt.Sprintf("%d", disk.Size))
urlValues.Add("type", "D")
// urlValues.Add("iops", )
disk_id_resp, err := ctrl.decortAPICall("POST", DiskCreateAPI, urlValues)
if err != nil {
// failed to create disk - partial resource update
return err
// TODO: implement do_delta logic
func (ctrl *ControllerCfg) utilityComputeExtraDisksConfigure(d *schema.ResourceData, do_delta bool) error {
// d is filled with data according to computeResource schema, so extra disks config is retrieved via "extra_disks" key
// If do_delta is true, this function will identify changes between new and existing specs for extra disks and try to
// update compute configuration accordingly
argVal, argSet := d.GetOk("extra_disks")
if !argSet || len(argVal.([]interface{})) < 1 {
return nil
}
// disk created - API call returns disk ID as a string - use it to update
// disk ID in the corresponding MachineConfig.DiskConfig record
mcfg.DataDisks[index].ID, err = strconv.Atoi(disk_id_resp)
if err != nil {
// failed to convert disk ID into proper integer value - partial resource update
return err
}
extra_disks_list := argVal.([]interface{}) // "extra_disks" is a list of ints
// now that we have disk created and stored its ID in the mcfg.DataDisks[index].ID
// we can attempt attaching the disk to the VM
urlValues = &url.Values{}
// urlValues.Add("machineId", fmt.Sprintf("%d", mcfg.ID))
urlValues.Add("machineId", fmt.Sprintf("%d", mcfg.ID))
urlValues.Add("diskId", disk_id_resp)
_, err = ctrl.decortAPICall("POST", DiskAttachAPI, urlValues)
for _, disk := range extra_disks_list {
urlValues := &url.Values{}
urlValues.Add("computeId", d.Id())
urlValues.Add("diskId", fmt.Sprintf("%d", disk.(int)))
_, err := ctrl.decortAPICall("POST", ComputeDiskAttachAPI, urlValues)
if err != nil {
// failed to attach disk - partial resource update
// failed to attach extra disk - partial resource update
return err
}
}
return nil
}
func (ctrl *ControllerCfg) utilityVmPortforwardsProvision(mcfg *MachineConfig) error {
for _, rule := range mcfg.PortForwards {
urlValues := &url.Values{}
urlValues.Add("machineId", fmt.Sprintf("%d", mcfg.ID))
urlValues.Add("cloudspaceId", fmt.Sprintf("%d", mcfg.ResGroupID))
urlValues.Add("publicIp", mcfg.ExtIP) // this may be obsoleted by Resource group implementation
urlValues.Add("publicPort", fmt.Sprintf("%d", rule.ExtPort))
urlValues.Add("localPort", fmt.Sprintf("%d", rule.IntPort))
urlValues.Add("protocol", rule.Proto)
_, err := ctrl.decortAPICall("POST", PortforwardingCreateAPI, urlValues)
if err != nil {
// failed to create port forward rule - partial resource update
return err
}
}
// TODO: implement do_delta logic
func (ctrl *ControllerCfg) utilityComputeNetworksConfigure(d *schema.ResourceData, do_delta bool) error {
// "d" is filled with data according to computeResource schema, so extra networks config is retrieved via "networks" key
// If do_delta is true, this function will identify changes between new and existing specs for network and try to
// update compute configuration accordingly
argVal, argSet := d.GetOk("networks")
if !argSet || len(argVal.([]interface{})) < 1 {
return nil
}
}
net_list := argVal.([]interface{}) // networks" is a list of maps; for keys see func networkSubresourceSchemaMake() definition
func (ctrl *ControllerCfg) utilityVmNetworksProvision(mcfg *MachineConfig) error {
for _, net := range mcfg.Networks {
for _, net := range net_list {
urlValues := &url.Values{}
urlValues.Add("machineId", fmt.Sprintf("%d", mcfg.ID))
urlValues.Add("externalNetworkId", fmt.Sprintf("%d", net.NetworkID))
_, err := ctrl.decortAPICall("POST", AttachExternalNetworkAPI, urlValues)
net_data := net.(map[string]interface{})
urlValues.Add("computeId", fmt.Sprintf("%d", d.Id()))
urlValues.Add("netType", net_data["net_type"].(string))
urlValues.Add("netId", fmt.Sprintf("%d", net_data["net_id"].(int)))
ipaddr, ipSet := net_data["ipaddr"] // "ipaddr" key is optional
if ipSet {
urlValues.Add("ipAddr", ipaddr.(string))
}
_, err := ctrl.decortAPICall("POST", ComputeNetAttachAPI, urlValues)
if err != nil {
// failed to attach network - partial resource update
return err
@ -111,8 +91,6 @@ func (ctrl *ControllerCfg) utilityVmNetworksProvision(mcfg *MachineConfig) error
return nil
}
*/
func utilityComputeCheckPresence(d *schema.ResourceData, m interface{}) (string, error) {
// This function tries to locate Compute by one of the following approaches:
// - if compute_id is specified - locate by compute ID

Loading…
Cancel
Save