Compare commits

..

6 Commits

Author SHA1 Message Date
Sergey Shubin svs1370
4c3e2c1363 Interim commit of PFW rules implementation. This is very preliminary! 2021-10-07 00:58:05 +03:00
Sergey Shubin svs1370
8058b1c08f Deprecate ssh_keys argument to decort_kvmvm, as it is replaced by cloud_init 2021-10-06 11:19:01 +03:00
Sergey Shubin svs1370
fb83398df9 Update README 2021-10-06 10:53:41 +03:00
Sergey Shubin svs1370
a63a35ca93 Switch to native Set methods for network definition blok, hardcode disk type D 2021-10-05 17:47:08 +03:00
Sergey Shubin svs1370
cf669a7a72 Make sure extra disks (if any) are detached from compute being deleted 2021-10-05 15:01:09 +03:00
Sergey Shubin svs1370
8da3f8d348 Moving from TypeList to TypeSet for network and extra_disks attributes of compute 2021-10-04 19:55:03 +03:00
13 changed files with 518 additions and 232 deletions

View File

@@ -1,13 +1,13 @@
# terraform-provider-decort
Terraform provider for Digital Energy Cloud Orchestration Technology (DECORT) platform
NOTE: provider rc-1.20 is designed for DECORT API 3.7.x. For older API versions please use:
NOTE: provider rc-1.30 is designed for DECORT API 3.7.x. For older API versions please use:
- DECORT API 3.6.x versions - provider version rc-1.10
- DECORT API versions prior to 3.6.0 - Terraform DECS provider (https://github.com/rudecs/terraform-provider-decs).
- DECORT API versions prior to 3.6.0 - Terraform DECS provider (https://github.com/rudecs/terraform-provider-decs)
With this provider you can manage Compute instances, disks and resource groups in DECORT platform,
as well as query the platform for information about existing resources. This provider supports
Import operations on pre-existing resources.
With this provider you can manage Compute instances, disks, virtual network segments and resource
groups in DECORT platform, as well as query the platform for information about existing resources.
This provider supports Import operations on pre-existing resources.
See user guide at https://github.com/rudecs/terraform-provider-decort/wiki

View File

@@ -28,6 +28,7 @@ import (
"encoding/json"
"fmt"
// "net/url"
// "strconv"
log "github.com/sirupsen/logrus"
@@ -65,6 +66,8 @@ func parseComputeDisksToExtraDisks(disks []DiskRecord) []interface{} {
return result
}
// NOTE: this is a legacy function, which is not used as of rc-1.10
// Use "parseComputeDisksToExtraDisks" instead
func parseComputeDisks(disks []DiskRecord) []interface{} {
// Return value was designed to d.Set("disks",) item of dataSourceCompute schema
// However, this item was excluded from the schema as it is not directly
@@ -80,21 +83,20 @@ func parseComputeDisks(disks []DiskRecord) []interface{} {
}
*/
result := make([]interface{}, length)
result := []interface{}{}
if length == 0 {
return result
}
elem := make(map[string]interface{})
for i, value := range disks {
for _, value := range disks {
/*
if value.Type == "B" {
// skip boot disk when parsing the list of disks
continue
}
*/
elem := make(map[string]interface{})
// keys in this map should correspond to the Schema definition
// as returned by dataSourceDiskSchemaMake()
elem["name"] = value.Name
@@ -111,7 +113,8 @@ func parseComputeDisks(disks []DiskRecord) []interface{} {
// elem["status"] = value.Status
// elem["tech_status"] = value.TechStatus
elem["compute_id"] = value.ComputeID
result[i] = elem
result = append(result, elem)
}
return result
@@ -149,8 +152,40 @@ func parseBootDiskId(disks []DiskRecord) uint {
// Parse the list of interfaces from compute/get response into a list of networks
// attached to this compute
func parseComputeInterfacesToNetworks(ifaces []InterfaceRecord, pfwVinsID int, pfwRules []map[string]interface{}) []interface{} {
// return value will be used to d.Set("network") item of dataSourceCompute schema
length := len(ifaces)
log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length)
result := []interface{}{}
for _, value := range ifaces {
elem := make(map[string]interface{})
// Keys in this map should correspond to the Schema definition
// as returned by networkSubresourceSchemaMake()
elem["net_id"] = value.NetID
elem["net_type"] = value.NetType
elem["ip_address"] = value.IPAddress
elem["mac"] = value.MAC
if value.NetType == "VINS" && len(pfwRules) > 0 && pfwVinsID == value.NetID {
// we have non-empty port forward rules that seem to be relevant to the current
// network segment - set "pfw_rule" element accordingly
log.Debugf("parseComputeInterfacesToNetworks: setting pfw_rule attributes on network block for ViNS ID %d",
value.NetID)
elem["pfw_rule"] = pfwRules
}
// log.Debugf(" element %d: net_id=%d, net_type=%s", i, value.NetID, value.NetType)
result = append(result, elem)
}
return result
}
/*
func parseComputeInterfacesToNetworks(ifaces []InterfaceRecord) []map[string]interface{} {
// return value will be used to d.Set("network",) item of dataSourceCompute schema
// return value will be used to d.Set("network") item of dataSourceCompute schema
length := len(ifaces)
log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length)
@@ -172,7 +207,10 @@ func parseComputeInterfacesToNetworks(ifaces []InterfaceRecord) []map[string]int
return result
}
*/
// NOTE: this function is retained for historical purposes and actually not used as of rc-1.10
func parseComputeInterfaces(ifaces []InterfaceRecord) []map[string]interface{} {
// return value was designed to d.Set("interfaces",) item of dataSourceCompute schema
// However, this item was excluded from the schema as it is not directly
@@ -211,8 +249,8 @@ func parseComputeInterfaces(ifaces []InterfaceRecord) []map[string]interface{} {
return result
}
func flattenCompute(d *schema.ResourceData, compFacts string) error {
// This function expects that comp_facts string contains response from API compute/get,
func flattenCompute(d *schema.ResourceData, compFacts string, pfwVinsID int, pfwRules []map[string]interface{}) error {
// This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance.
//
// NOTE: this function modifies ResourceData argument - as such it should never be called
@@ -254,7 +292,7 @@ func flattenCompute(d *schema.ResourceData, compFacts string) error {
if len(model.Interfaces) > 0 {
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(model.Interfaces))
if err = d.Set("network", parseComputeInterfacesToNetworks(model.Interfaces)); err != nil {
if err = d.Set("network", parseComputeInterfacesToNetworks(model.Interfaces, pfwVinsID, pfwRules)); err != nil {
return err
}
}
@@ -270,15 +308,23 @@ func flattenCompute(d *schema.ResourceData, compFacts string) error {
}
func dataSourceComputeRead(d *schema.ResourceData, m interface{}) error {
compFacts, err := utilityComputeCheckPresence(d, m)
compID, compFacts, err := utilityComputeCheckPresence(d, m)
if compFacts == "" {
// if empty string is returned from utilityComputeCheckPresence then there is no
// such Compute and err tells so - just return it to the calling party
// if empty compFacts is returned from utilityComputeCheckPresence and err=nil
// it means that there is no such Compute;
// In any other case non-nil error will be reported.
d.SetId("") // ensure ID is empty
return err
}
return flattenCompute(d, compFacts)
vinsID, pfwRules, err := utilityComputePfwGet(compID, m)
if err != nil {
log.Errorf("dataSourceComputeRead: there was error calling utilityComputePfwGet for compute ID %s: %s",
d.Id(), err)
return err
}
return flattenCompute(d, compFacts, vinsID, pfwRules)
}
func dataSourceCompute() *schema.Resource {
@@ -374,7 +420,7 @@ func dataSourceCompute() *schema.Resource {
},
"extra_disks": {
Type: schema.TypeList,
Type: schema.TypeSet,
Computed: true,
MaxItems: MaxExtraDisksPerCompute,
Elem: &schema.Schema {
@@ -395,7 +441,7 @@ func dataSourceCompute() *schema.Resource {
*/
"network": {
Type: schema.TypeList,
Type: schema.TypeSet,
Optional: true,
MaxItems: MaxNetworksPerCompute,
Elem: &schema.Resource{

View File

@@ -67,18 +67,6 @@ func flattenDisk(d *schema.ResourceData, disk_facts string) error {
// d.Set("compute_id", model.ComputeID)
d.Set("description", model.Desc)
// d.Set("status", model.Status)
// d.Set("tech_status", model.TechStatus)
/* we do not manage snapshots via Terraform yet (and probably, never will), so
// keep this block commented out for a while
if len(model.Snapshots) > 0 {
log.Debugf("flattenDisk: calling flattenDiskSnapshots")
if err = d.Set("nics", flattenDiskSnapshots(model.Snapshots)); err != nil {
return err
}
}
*/
return nil
}
@@ -137,7 +125,7 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
"type": {
Type: schema.TypeString,
Computed: true,
Description: "Type of this disk.",
Description: "Type of this disk. E.g. D for data disks, B for boot.",
},
"description": {

View File

@@ -44,6 +44,7 @@ func diskSubresourceSchemaMake() map[string]*schema.Schema {
"account_id": {
Type: schema.TypeInt,
Computed: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the account this disk belongs to.",
},

View File

@@ -444,9 +444,17 @@ const AccountsListAPI = "/restmachine/cloudapi/account/list" // returns list of
type AccountsListResp []AccountRecord
//
// structures related to /cloudapi/portforwarding/list API
// structures related to /cloudapi/compute/pfwlLst API
//
type PfwRecord struct {
// Note that if there are port forwarding rules for compute, then compute/pfwList response
// will contain a list which starts with prefix (see PfwPrefixRecord) and then contains
// one or more rule records (see PfwRuleRecord)
type PfwPrefixRecord struct {
VinsID int `json:"vinsId"`
VinsName string `json:"vinsName"`
}
type PfwRuleRecord struct {
ID int `json:"id"`
LocalIP string `json:"localIp"`
LocalPort int `json:"localPort"`
@@ -458,8 +466,6 @@ type PfwRecord struct {
const ComputePfwListAPI = "/restmachine/cloudapi/compute/pfwList"
type ComputePfwListResp []PfwRecord
const ComputePfwAddAPI = "/restmachine/cloudapi/compute/pfwAdd"
const ComputePfwDelAPI = "/restmachine/cloudapi/compute/pfwDel"
@@ -538,6 +544,8 @@ type VnfRecord struct {
AccountID int `json:"accountId"`
Type string `json:"type"` // "DHCP", "NAT", "GW" etc
Config map[string]interface{} `json:"config"` // NOTE: VNF specs vary by VNF type
Status string `json:"status"`
TechStatus string `json:"techStatus"`
}
type VnfGwConfigRecord struct { // describes GW VNF config structure inside ViNS, as returned by API vins/get
@@ -546,6 +554,14 @@ type VnfGwConfigRecord struct { // describes GW VNF config structure inside ViNS
ExtNetMask int `json:"ext_net_mask"`
DefaultGW string `json:"default_gw"`
}
type NatRuleRecord struct { // describes one NAT rule, a list of such rules is maintained inside VNF NAT Config
}
type VnfNatConfigRecord struct { // describes NAT VNF config structure inside ViNS, as returned by API vins/get
Netmask int `json:"netmask"`
Network string `json:"network"` // just network address, no mask, e.g. "192.168.1.0"
Rules []NatRuleRecord `json:"rules"`
}
type VinsRecord struct { // represents part of the response from API vins/get
ID int `json:"id"`
Name string `json:"name"`

View File

@@ -21,11 +21,15 @@ import (
// "encoding/json"
// "fmt"
"bytes"
"hash/fnv"
log "github.com/sirupsen/logrus"
// "net/url"
"sort"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
// "github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode"
)
// This is subresource of compute resource used when creating/managing compute network connections
@@ -39,6 +43,71 @@ func networkSubresIPAddreDiffSupperss(key, oldVal, newVal string, d *schema.Reso
return true // suppress difference
}
// This function is based on the original Terraform SerializeResourceForHash found
// in helper/schema/serialize.go
// It skips network subresource attributes, which are irrelevant for identification
// of unique network blocks
func networkSubresourceSerialize(output *bytes.Buffer, val interface{}, resource *schema.Resource) {
if val == nil {
return
}
rs := resource.Schema
m := val.(map[string]interface{})
var keys []string
allComputed := true
for k, val := range rs {
if val.Optional || val.Required {
allComputed = false
}
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
// explicitly ignore "ip_address" when hashing
if k == "ip_address" {
continue
}
subSchema := rs[k]
// Skip attributes that are not user-provided. Computed attributes
// do not contribute to the hash since their ultimate value cannot
// be known at plan/diff time.
if !allComputed && !(subSchema.Required || subSchema.Optional) {
continue
}
output.WriteString(k)
output.WriteRune(':')
value := m[k]
schema.SerializeValueForHash(output, value, subSchema)
}
}
// HashNetworkSubresource hashes network subresource of compute resource. It uses
// specially designed networkSubresourceSerialize (see above) to make sure hashing
// does not involve attributes that we deem irrelevant to the uniqueness of network
// subresource definitions.
// It is this function that should be specified as SchemaSetFunc when creating Set
// from network subresource (e.g. in flattenCompute)
//
// This function is based on the original Terraform function HashResource from
// helper/schema/set.go
func HashNetworkSubresource(resource *schema.Resource) schema.SchemaSetFunc {
return func(v interface{}) int {
var serialized bytes.Buffer
networkSubresourceSerialize(&serialized, v, resource)
hs := fnv.New32a()
hs.Write(serialized.Bytes())
return int(hs.Sum32())
}
}
func networkSubresourceSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"net_type": {
@@ -58,8 +127,9 @@ func networkSubresourceSchemaMake() map[string]*schema.Schema {
"ip_address": {
Type: schema.TypeString,
Optional: true,
Computed: true,
DiffSuppressFunc: networkSubresIPAddreDiffSupperss,
Description: "Optional IP address to assign to this connection. This IP should belong to the selected network and free for use.",
Description: "Optional IP address to assign to this connection. This IP should belong to the selected network and available for use.",
},
"mac": {
@@ -68,6 +138,15 @@ func networkSubresourceSchemaMake() map[string]*schema.Schema {
Description: "MAC address associated with this connection. MAC address is assigned automatically.",
},
"pfw_rule": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: pfwSubresourceSchemaMake(),
},
Description: "Port forwarding rule to setup for this connection. You may specify several such blocks, one for each rule.",
},
}
return rets
}

70
decort/pfw_subresource.go Normal file
View File

@@ -0,0 +1,70 @@
/*
Copyright (c) 2019-2021 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package decort
import (
// "encoding/json"
// "fmt"
// "bytes"
// log "github.com/sirupsen/logrus"
// "net/url"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
// This is subresource of network subresource of compute resource used
// when creating/managing port forwarding rules for a compute connected
// to the corresponding network
// It only applies to a ViNS connection AND to a ViNS with external network connection
func pfwSubresourceSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"pub_port_start": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntBetween(1, 65535),
Description: "Port number on the external interface. For a ranged rule it set the starting port number.",
},
"pub_port_end": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntBetween(1, 65535),
Description: "End port number on the external interface for a ranged rule. Set it equal to start port for a single port rule.",
},
"local_port": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntBetween(1, 65535),
Description: "Port number on the local interface.",
},
"proto": {
Type: schema.TypeString,
Required: true,
StateFunc: stateFuncToLower,
ValidateFunc: validation.StringInSlice([]string{"tcp", "udp"}, false),
Description: "Protocol for this rule. Could be either tcp or udp.",
},
}
return rets
}

View File

@@ -103,7 +103,7 @@ func Provider() *schema.Provider {
"decort_kvmvm": resourceCompute(),
"decort_disk": resourceDisk(),
"decort_vins": resourceVins(),
// "decort_pfw": resourcePfw(),
// "decort_k8s": resourceK8s(),
},
DataSourcesMap: map[string]*schema.Resource{
@@ -113,7 +113,8 @@ func Provider() *schema.Provider {
"decort_image": dataSourceImage(),
"decort_disk": dataSourceDisk(),
"decort_vins": dataSourceVins(),
// "decort_pfw": dataSourcePfw(),
// "decort_k8ci": dataSourceK8ci(),
// "decort_k8s": dataSourceK8s(),
},
ConfigureFunc: providerConfigure,

View File

@@ -25,7 +25,7 @@ Visit https://github.com/rudecs/terraform-provider-decort for full source code p
package decort
import (
// "encoding/json"
"encoding/json"
"fmt"
"net/url"
"strconv"
@@ -75,12 +75,14 @@ func resourceComputeCreate(d *schema.ResourceData, m interface{}) error {
urlValues.Add("desc", argVal.(string))
}
/*
sshKeysVal, sshKeysSet := d.GetOk("ssh_keys")
if sshKeysSet {
// process SSH Key settings and set API values accordingly
log.Debugf("resourceComputeCreate: calling makeSshKeysArgString to setup SSH keys for guest login(s)")
urlValues.Add("userdata", makeSshKeysArgString(sshKeysVal.([]interface{})))
}
*/
computeCreateAPI := KvmX86CreateAPI
arch := d.Get("arch").(string)
@@ -114,21 +116,23 @@ func resourceComputeCreate(d *schema.ResourceData, m interface{}) error {
d.SetPartial("ram")
d.SetPartial("image_id")
d.SetPartial("boot_disk_size")
/*
if sshKeysSet {
d.SetPartial("ssh_keys")
}
*/
log.Debugf("resourceComputeCreate: new simple Compute ID %d, name %s created", compId, d.Get("name").(string))
// Configure data disks if any
extraDisksOk := true
argVal, argSet = d.GetOk("extra_disks")
if argSet && len(argVal.([]interface{})) > 0 {
if argSet && argVal.(*schema.Set).Len() > 0 {
// urlValues.Add("desc", argVal.(string))
log.Debugf("resourceComputeCreate: calling utilityComputeExtraDisksConfigure to attach %d extra disk(s)", len(argVal.([]interface{})))
log.Debugf("resourceComputeCreate: calling utilityComputeExtraDisksConfigure to attach %d extra disk(s)", argVal.(*schema.Set).Len())
err = controller.utilityComputeExtraDisksConfigure(d, false) // do_delta=false, as we are working on a new compute
if err != nil {
log.Errorf("resourceComputeCreate: error when attaching extra disks to a new Compute ID %s: %s", compId, err)
log.Errorf("resourceComputeCreate: error when attaching extra disk(s) to a new Compute ID %s: %s", compId, err)
extraDisksOk = false
}
}
@@ -139,8 +143,8 @@ func resourceComputeCreate(d *schema.ResourceData, m interface{}) error {
// Configure external networks if any
netsOk := true
argVal, argSet = d.GetOk("network")
if argSet && len(argVal.([]interface{})) > 0 {
log.Debugf("resourceComputeCreate: calling utilityComputeNetworksConfigure to attach %d network(s)", len(argVal.([]interface{})))
if argSet && argVal.(*schema.Set).Len() > 0 {
log.Debugf("resourceComputeCreate: calling utilityComputeNetworksConfigure to attach %d network(s)", argVal.(*schema.Set).Len())
err = controller.utilityComputeNetworksConfigure(d, false) // do_delta=false, as we are working on a new compute
if err != nil {
log.Errorf("resourceComputeCreate: error when attaching networks to a new Compute ID %d: %s", compId, err)
@@ -180,7 +184,7 @@ func resourceComputeRead(d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceComputeRead: called for Compute name %s, RG ID %d",
d.Get("name").(string), d.Get("rg_id").(int))
compFacts, err := utilityComputeCheckPresence(d, m)
compID, compFacts, err := utilityComputeCheckPresence(d, m)
if compFacts == "" {
if err != nil {
return err
@@ -189,7 +193,14 @@ func resourceComputeRead(d *schema.ResourceData, m interface{}) error {
return nil
}
if err = flattenCompute(d, compFacts); err != nil {
vinsID, pfwRules, err := utilityComputePfwGet(compID, m)
if err != nil {
log.Errorf("resourceComputeRead: there was error calling utilityComputePfwGet for compute ID %s: %s",
d.Id(), err)
return err
}
if err = flattenCompute(d, compFacts, vinsID, pfwRules); err != nil {
return err
}
@@ -289,23 +300,52 @@ func resourceComputeUpdate(d *schema.ResourceData, m interface{}) error {
func resourceComputeDelete(d *schema.ResourceData, m interface{}) error {
// NOTE: this function destroys target Compute instance "permanently", so
// there is no way to restore it. It also destroys all extra disks
// attached to this compute, so "User, ye be warned!"
// there is no way to restore it.
// If compute being destroyed has some extra disks attached, they are
// detached from the compute
log.Debugf("resourceComputeDelete: called for Compute name %s, RG ID %d",
d.Get("name").(string), d.Get("rg_id").(int))
compFacts, err := utilityComputeCheckPresence(d, m)
_, compFacts, err := utilityComputeCheckPresence(d, m)
if compFacts == "" {
// the target Compute does not exist - in this case according to Terraform best practice
// we exit from Destroy method without error
return nil
}
controller := m.(*ControllerCfg)
model := ComputeGetResp{}
log.Debugf("resourceComputeDelete: ready to unmarshal string %s", compFacts)
err = json.Unmarshal([]byte(compFacts), &model)
if err == nil && len(model.Disks) > 0 {
// prepare to detach data disks from compute - do it only if compFacts unmarshalled
// properly and the resulting model contains non-empty Disks list
for _, diskFacts := range model.Disks {
if diskFacts.Type == "B" {
// boot disk is never detached on compute delete
continue
}
log.Debugf("resourceComputeDelete: ready to detach data disk ID %d from compute ID %s", diskFacts.ID, d.Id())
detachParams := &url.Values{}
detachParams.Add("computeId", d.Id())
detachParams.Add("diskId", fmt.Sprintf("%d", diskFacts.ID))
_, err = controller.decortAPICall("POST", ComputeDiskDetachAPI, detachParams)
if err != nil {
// We do not fail compute deletion on data disk detach errors
log.Errorf("resourceComputeDelete: error when detaching Disk ID %d: %s", diskFacts.ID, err)
}
}
}
params := &url.Values{}
params.Add("computeId", d.Id())
params.Add("permanently", "1")
controller := m.(*ControllerCfg)
// TODO: this is for the upcoming API update - params.Add("detachdisks", "1")
_, err = controller.decortAPICall("POST", ComputeDeleteAPI, params)
if err != nil {
return err
@@ -316,10 +356,10 @@ func resourceComputeDelete(d *schema.ResourceData, m interface{}) error {
func resourceComputeExists(d *schema.ResourceData, m interface{}) (bool, error) {
// Reminder: according to Terraform rules, this function should not modify its ResourceData argument
log.Debugf("resourceComputeExist: called for Compute name %q, RG ID %d",
log.Debugf("resourceComputeExist: called for Compute name %s, RG ID %d",
d.Get("name").(string), d.Get("rg_id").(int))
compFacts, err := utilityComputeCheckPresence(d, m)
_, compFacts, err := utilityComputeCheckPresence(d, m)
if compFacts == "" {
if err != nil {
return false, err
@@ -398,12 +438,12 @@ func resourceCompute() *schema.Resource {
"boot_disk_size": {
Type: schema.TypeInt,
Optional: true,
Description: "This compute instance boot disk size in GB.",
Required: true,
Description: "This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image.",
},
"extra_disks": {
Type: schema.TypeList,
Type: schema.TypeSet,
Optional: true,
MaxItems: MaxExtraDisksPerCompute,
Elem: &schema.Schema{
@@ -413,7 +453,7 @@ func resourceCompute() *schema.Resource {
},
"network": {
Type: schema.TypeList,
Type: schema.TypeSet,
Optional: true,
MaxItems: MaxNetworksPerCompute,
Elem: &schema.Resource{
@@ -422,6 +462,7 @@ func resourceCompute() *schema.Resource {
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.",
},
/*
"ssh_keys": {
Type: schema.TypeList,
Optional: true,
@@ -431,6 +472,7 @@ func resourceCompute() *schema.Resource {
},
Description: "SSH keys to authorize on this compute instance.",
},
*/
"description": {
Type: schema.TypeString,

View File

@@ -46,7 +46,7 @@ func resourceDiskCreate(d *schema.ResourceData, m interface{}) error {
urlValues.Add("gid", fmt.Sprintf("%d", DefaultGridID)) // we use default Grid ID, which was obtained along with DECORT Controller init
urlValues.Add("name", d.Get("name").(string))
urlValues.Add("size", fmt.Sprintf("%d", d.Get("size").(int)))
urlValues.Add("type", d.Get("type").(string))
urlValues.Add("type", "D") // NOTE: only disks of Data type are managed via plugin
urlValues.Add("sep_id", fmt.Sprintf("%d", d.Get("sep_id").(int)))
urlValues.Add("pool", d.Get("pool").(string))
@@ -128,10 +128,14 @@ func resourceDiskUpdate(d *schema.ResourceData, m interface{}) error {
d.SetPartial("name")
}
/*
NOTE: plugin will manage disks of type "Data" only, and type cannot be changed once disk is created
oldType, newType := d.GetChange("type")
if oldType.(string) != newType.(string) {
return fmt.Errorf("resourceDiskUpdate: Disk ID %s - changing type of existing disk not allowed", d.Id())
}
*/
d.Partial(false)
@@ -233,6 +237,8 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Description: "Size of the disk in GB. Note, that existing disks can only be grown in size.",
},
/* We moved "type" attribute to computed attributes section, as plugin manages disks of only
one type - "D", e.g. data disks.
"type": {
Type: schema.TypeString,
Optional: true,
@@ -241,6 +247,7 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
ValidateFunc: validation.StringInSlice([]string{"B", "D"}, false),
Description: "Optional type of this disk. Defaults to D, i.e. data disk. Cannot be changed for existing disks.",
},
*/
"description": {
Type: schema.TypeString,
@@ -262,6 +269,12 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Description: "ID of the image, which this disk was cloned from (if ever cloned).",
},
"type": {
Type: schema.TypeString,
Computed: true,
Description: "Type of this disk.",
},
"sep_type": {
Type: schema.TypeString,
Computed: true,
@@ -277,24 +290,6 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
},
Description: "List of user-created snapshots for this disk."
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Current model status of this disk.",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "Current technical status of this disk.",
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the compute instance where this disk is attached to, or 0 for unattached disk.",
},
*/
}

View File

@@ -146,7 +146,28 @@ func resourceResgroupUpdate(d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceResgroupUpdate: called for RG name %s, account ID %d",
d.Get("name").(string), d.Get("account_id").(int))
do_update := false
/* NOTE: we do not allow changing the following attributes of an existing RG via terraform:
- def_net_type
- ipcidr
- ext_net_id
- ext_ip
The following code fragment checks if any of these have been changed and generates error.
*/
for _, attr := range []string{"def_net_type", "ipcidr", "ext_ip"} {
attr_new, attr_old := d.GetChange("def_net_type")
if attr_new.(string) != attr_old.(string) {
return fmt.Errorf("resourceResgroupUpdate: RG ID %s: changing %s for existing RG is not allowed", d.Id(), attr)
}
}
attr_new, attr_old := d.GetChange("ext_net_id")
if attr_new.(int) != attr_old.(int) {
return fmt.Errorf("resourceResgroupUpdate: RG ID %s: changing ext_net_id for existing RG is not allowed", d.Id())
}
do_general_update := false // will be true if general RG update is necessary (API rg/update)
controller := m.(*ControllerCfg)
url_values := &url.Values{}
@@ -157,7 +178,7 @@ func resourceResgroupUpdate(d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceResgroupUpdate: name specified - looking for deltas from the old settings.")
name_old, _ := d.GetChange("name")
if name_old.(string) != name_new.(string) {
do_update = true
do_general_update = true
url_values.Add("name", name_new.(string))
}
}
@@ -170,31 +191,31 @@ func resourceResgroupUpdate(d *schema.ResourceData, m interface{}) error {
quotarecord_old, _ := makeQuotaRecord(quota_value_old.([]interface{}))
if quotarecord_new.Cpu != quotarecord_old.Cpu {
do_update = true
do_general_update = true
log.Debugf("resourceResgroupUpdate: Cpu diff %d <- %d", quotarecord_new.Cpu, quotarecord_old.Cpu)
url_values.Add("maxCPUCapacity", fmt.Sprintf("%d", quotarecord_new.Cpu))
}
if quotarecord_new.Disk != quotarecord_old.Disk {
do_update = true
do_general_update = true
log.Debugf("resourceResgroupUpdate: Disk diff %d <- %d", quotarecord_new.Disk, quotarecord_old.Disk)
url_values.Add("maxVDiskCapacity", fmt.Sprintf("%d", quotarecord_new.Disk))
}
if quotarecord_new.Ram != quotarecord_old.Ram { // NB: quota on RAM is stored as float32, in units of MB
do_update = true
do_general_update = true
log.Debugf("resourceResgroupUpdate: Ram diff %f <- %f", quotarecord_new.Ram, quotarecord_old.Ram)
url_values.Add("maxMemoryCapacity", fmt.Sprintf("%f", quotarecord_new.Ram))
}
if quotarecord_new.ExtTraffic != quotarecord_old.ExtTraffic {
do_update = true
do_general_update = true
log.Debugf("resourceResgroupUpdate: ExtTraffic diff %d <- %d", quotarecord_new.ExtTraffic, quotarecord_old.ExtTraffic)
url_values.Add("maxNetworkPeerTransfer", fmt.Sprintf("%d", quotarecord_new.ExtTraffic))
}
if quotarecord_new.ExtIPs != quotarecord_old.ExtIPs {
do_update = true
do_general_update = true
log.Debugf("resourceResgroupUpdate: ExtIPs diff %d <- %d", quotarecord_new.ExtIPs, quotarecord_old.ExtIPs)
url_values.Add("maxNumPublicIP", fmt.Sprintf("%d", quotarecord_new.ExtIPs))
}
@@ -205,12 +226,12 @@ func resourceResgroupUpdate(d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceResgroupUpdate: description specified - looking for deltas from the old settings.")
desc_old, _ := d.GetChange("description")
if desc_old.(string) != desc_new.(string) {
do_update = true
do_general_update = true
url_values.Add("desc", desc_new.(string))
}
}
if do_update {
if do_general_update {
log.Debugf("resourceResgroupUpdate: detected delta between new and old RG specs - updating the RG")
_, err := controller.decortAPICall("POST", ResgroupUpdateAPI, url_values)
if err != nil {
@@ -303,7 +324,7 @@ func resourceResgroup() *schema.Resource {
Type: schema.TypeString,
Optional: true,
Default: "PRIVATE",
// ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC", "NONE"}, false),
ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC", "NONE"}, false),
Description: "Type of the network, which this resource group will use as default for its computes - PRIVATE or PUBLIC or NONE.",
},
@@ -323,13 +344,13 @@ func resourceResgroup() *schema.Resource {
Type: schema.TypeInt,
Optional: true,
Default: 0,
Description: "ID of the external network, which this resource group will use as default for its computes if def_net_type=PUBLIC",
Description: "ID of the external network for default ViNS. Pass 0 if def_net_type=PUBLIC or no external connection required for the defult ViNS when def_net_type=PRIVATE",
},
"ext_ip": {
Type: schema.TypeString,
Optional: true,
Description: "IP address on the external netowrk to request, if def_net_type=PUBLIC",
Description: "IP address on the external netowrk to request when def_net_type=PRIVATE and ext_net_id is not 0",
},
/* commented out, as in this version of provider we use default Grid ID

View File

@@ -33,6 +33,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
// "github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
@@ -223,6 +224,7 @@ func resourceVinsSchemaMake() map[string]*schema.Schema {
"name": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
Description: "Name of the ViNS. Names are case sensitive and unique within the context of an account or resource group.",
},
@@ -247,12 +249,14 @@ func resourceVinsSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the account, which this ViNS belongs to. For ViNS created at account level, resource group ID is 0.",
},
"ext_net_id": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(0),
Description: "ID of the external network this ViNS is connected to. Pass 0 if no external connection required.",
},

View File

@@ -29,6 +29,7 @@ import (
"fmt"
"net/url"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
@@ -40,32 +41,25 @@ func (ctrl *ControllerCfg) utilityComputeExtraDisksConfigure(d *schema.ResourceD
// d is filled with data according to computeResource schema, so extra disks config is retrieved via "extra_disks" key
// If do_delta is true, this function will identify changes between new and existing specs for extra disks and try to
// update compute configuration accordingly
// Otherwise it will apply whatever is found in the new set of "extra_disks" right away.
// Primary use of do_delta=false is when calling this function from compute Create handler.
// Note that this function will not abort on API errors, but will continue to configure (attach / detach) other individual
// disks via atomic API calls. However, it will not retry failed manipulation on the same disk.
log.Debugf("utilityComputeExtraDisksConfigure: called for Compute ID %s with do_delta = %b", d.Id(), do_delta)
// NB: as of rc-1.25 "extra_disks" are TypeSet with the elem of TypeInt
old_set, new_set := d.GetChange("extra_disks")
old_disks := make([]interface{},0,0)
if old_set != nil {
old_disks = old_set.([]interface{})
}
new_disks := make([]interface{},0,0)
if new_set != nil {
new_disks = new_set.([]interface{})
}
apiErrCount := 0
var lastSavedError error
if !do_delta {
if len(new_disks) < 1 {
if new_set.(*schema.Set).Len() < 1 {
return nil
}
for _, disk := range new_disks {
for _, disk := range new_set.(*schema.Set).List() {
urlValues := &url.Values{}
urlValues.Add("computeId", d.Id())
urlValues.Add("diskId", fmt.Sprintf("%d", disk.(int)))
@@ -86,58 +80,31 @@ func (ctrl *ControllerCfg) utilityComputeExtraDisksConfigure(d *schema.ResourceD
return nil
}
var attach_list, detach_list []int
match := false
for _, oDisk := range old_disks {
match = false
for _, nDisk := range new_disks {
if oDisk.(int) == nDisk.(int) {
match = true
break
}
}
if !match {
detach_list = append(detach_list, oDisk.(int))
}
}
log.Debugf("utilityComputeExtraDisksConfigure: detach list has %d items for Compute ID %s", len(detach_list), d.Id())
for _, nDisk := range new_disks {
match = false
for _, oDisk := range old_disks {
if nDisk.(int) == oDisk.(int) {
match = true
break
}
}
if !match {
attach_list = append(attach_list, nDisk.(int))
}
}
log.Debugf("utilityComputeExtraDisksConfigure: attach list has %d items for Compute ID %s", len(attach_list), d.Id())
for _, diskId := range detach_list {
detach_set := old_set.(*schema.Set).Difference(new_set.(*schema.Set))
log.Debugf("utilityComputeExtraDisksConfigure: detach set has %d items for Compute ID %s", detach_set.Len(), d.Id())
for _, diskId := range detach_set.List() {
urlValues := &url.Values{}
urlValues.Add("computeId", d.Id())
urlValues.Add("diskId", fmt.Sprintf("%d", diskId))
urlValues.Add("diskId", fmt.Sprintf("%d", diskId.(int)))
_, err := ctrl.decortAPICall("POST", ComputeDiskDetachAPI, urlValues)
if err != nil {
// failed to detach disk - there will be partial resource update
log.Debugf("utilityComputeExtraDisksConfigure: failed to detach disk ID %d from Compute ID %s: %s", diskId, d.Id(), err)
log.Errorf("utilityComputeExtraDisksConfigure: failed to detach disk ID %d from Compute ID %s: %s", diskId.(int), d.Id(), err)
apiErrCount++
lastSavedError = err
}
}
for _, diskId := range attach_list {
attach_set := new_set.(*schema.Set).Difference(old_set.(*schema.Set))
log.Debugf("utilityComputeExtraDisksConfigure: attach set has %d items for Compute ID %s", attach_set.Len(), d.Id())
for _, diskId := range attach_set.List() {
urlValues := &url.Values{}
urlValues.Add("computeId", d.Id())
urlValues.Add("diskId", fmt.Sprintf("%d", diskId))
urlValues.Add("diskId", fmt.Sprintf("%d", diskId.(int)))
_, err := ctrl.decortAPICall("POST", ComputeDiskAttachAPI, urlValues)
if err != nil {
// failed to attach disk - there will be partial resource update
log.Debugf("utilityComputeExtraDisksConfigure: failed to attach disk ID %d to Compute ID %s: %s", diskId, d.Id(), err)
log.Errorf("utilityComputeExtraDisksConfigure: failed to attach disk ID %d to Compute ID %s: %s", diskId.(int), d.Id(), err)
apiErrCount++
lastSavedError = err
}
@@ -152,39 +119,26 @@ func (ctrl *ControllerCfg) utilityComputeExtraDisksConfigure(d *schema.ResourceD
return nil
}
// TODO: implement do_delta logic
func (ctrl *ControllerCfg) utilityComputeNetworksConfigure(d *schema.ResourceData, do_delta bool) error {
// "d" is filled with data according to computeResource schema, so extra networks config is retrieved via "network" key
// If do_delta is true, this function will identify changes between new and existing specs for network and try to
// update compute configuration accordingly
// Otherwise it will apply whatever is found in the new set of "network" right away.
// Primary use of do_delta=false is when calling this function from compute Create handler.
/*
argVal, argSet := d.GetOk("network")
if !argSet || len(argVal.([]interface{})) < 1 {
return nil
}
net_list := argVal.([]interface{}) // network is ar array of maps; for keys see func networkSubresourceSchemaMake() definition
*/
old_set, new_set := d.GetChange("network")
oldNets := make([]interface{},0,0)
if old_set != nil {
oldNets = old_set.([]interface{}) // network is ar array of maps; for keys see func networkSubresourceSchemaMake() definition
}
newNets := make([]interface{},0,0)
if new_set != nil {
newNets = new_set.([]interface{}) // network is ar array of maps; for keys see func networkSubresourceSchemaMake() definition
}
apiErrCount := 0
var lastSavedError error
if !do_delta {
for _, net := range newNets {
if new_set.(*schema.Set).Len() < 1 {
return nil
}
for _, runner := range new_set.(*schema.Set).List() {
urlValues := &url.Values{}
net_data := net.(map[string]interface{})
net_data := runner.(map[string]interface{})
urlValues.Add("computeId", d.Id())
urlValues.Add("netType", net_data["net_type"].(string))
urlValues.Add("netId", fmt.Sprintf("%d", net_data["net_id"].(int)))
@@ -192,11 +146,48 @@ func (ctrl *ControllerCfg) utilityComputeNetworksConfigure(d *schema.ResourceDat
if ipSet {
urlValues.Add("ipAddr", ipaddr.(string))
}
log.Debugf("utilityComputeNetworksConfigure: ready to add network type %s ID %d for Compute ID %s",
net_data["net_type"].(string), net_data["net_id"].(int), d.Id())
_, err := ctrl.decortAPICall("POST", ComputeNetAttachAPI, urlValues)
if err != nil {
// failed to attach network - partial resource update
apiErrCount++
lastSavedError = err
continue
}
if pfw_rules, ok := net_data["pfw_rule"]; ok {
// fool-proof - port forwarding is applicable to VINS type networks only! And only to
// those ViNSes that have active GW VNF, but here we check for VINS type only, the rest
// will be validated by the cloud platform
if net_data["net_type"].(string) != "VINS" {
log.Errorf("utilityComputeNetworksConfigure: encountered port forward rules specs in network block of type %s for Compute ID %s",
net_data["net_type"].(string), d.Id())
apiErrCount++
lastSavedError = err
continue
}
log.Debugf("utilityComputeNetworksConfigure: found port forward rules specs in network block ID %d for Compute ID %s",
net_data["net_id"].(int), d.Id())
for _, rule_runner := range pfw_rules.(*schema.Set).List() {
pfwValues := &url.Values{}
rule := rule_runner.(map[string]interface{})
pfwValues.Add("computeId", d.Id())
pfwValues.Add("publicPortStart", fmt.Sprintf("%d", rule["pub_port_start"].(int)))
pfwValues.Add("publicPortEnd", fmt.Sprintf("%d", rule["pub_port_end"].(int)))
pfwValues.Add("localBasePort", fmt.Sprintf("%d", rule["local_port"].(int)))
pfwValues.Add("proto", rule["proto"].(string))
log.Debugf("utilityComputeNetworksConfigure: ready to add pfw rule %d:%d -> %d proto %s for Compute ID %s",
rule["pub_port_start"].(int), rule["pub_port_end"].(int),
rule["proto"].(string), d.Id())
_, err := ctrl.decortAPICall("POST", ComputePfwAddAPI, pfwValues)
if err != nil {
// failed to add port forward rule - partial resource update
apiErrCount++
lastSavedError = err
}
}
}
}
@@ -208,84 +199,40 @@ func (ctrl *ControllerCfg) utilityComputeNetworksConfigure(d *schema.ResourceDat
return nil
}
var attachList, detachList []ComputeNetMgmtRecord
match := false
for _, oRunner := range oldNets {
match = false
oSpecs := oRunner.(map[string]interface{})
for _, nRunner := range newNets {
nSpecs := nRunner.(map[string]interface{})
if oSpecs["net_id"].(int) == nSpecs["net_id"].(int) && oSpecs["net_type"].(string) == nSpecs["net_type"].(string) {
match = true
break
}
}
if !match {
newItem := ComputeNetMgmtRecord{
ID: oSpecs["net_id"].(int),
Type: oSpecs["net_type"].(string),
IPAddress: oSpecs["ip_address"].(string),
MAC: oSpecs["mac"].(string),
}
detachList = append(detachList, newItem)
}
}
log.Debugf("utilityComputeNetworksConfigure: detach list has %d items for Compute ID %s", len(detachList), d.Id())
for _, nRunner := range newNets {
match = false
nSpecs := nRunner.(map[string]interface{})
for _, oRunner := range oldNets {
oSpecs := oRunner.(map[string]interface{})
if nSpecs["net_id"].(int) == oSpecs["net_id"].(int) && nSpecs["net_type"].(string) == oSpecs["net_type"].(string) {
match = true
break
}
}
if !match {
newItem := ComputeNetMgmtRecord{
ID: nSpecs["net_id"].(int),
Type: nSpecs["net_type"].(string),
}
if nSpecs["ip_address"] != nil {
newItem.IPAddress = nSpecs["ip_address"].(string)
} else {
newItem.IPAddress = "" // make sure it is empty, if not coming from the schema
}
attachList = append(attachList, newItem)
}
}
log.Debugf("utilityComputeNetworksConfigure: attach list has %d items for Compute ID %s", len(attachList), d.Id())
for _, netRec := range detachList {
detach_set := old_set.(*schema.Set).Difference(new_set.(*schema.Set))
log.Debugf("utilityComputeNetworksConfigure: detach set has %d items for Compute ID %s", detach_set.Len(), d.Id())
for _, runner := range detach_set.List() {
urlValues := &url.Values{}
net_data := runner.(map[string]interface{})
urlValues.Add("computeId", d.Id())
urlValues.Add("ipAddr", netRec.IPAddress)
urlValues.Add("mac", netRec.MAC)
urlValues.Add("ipAddr", net_data["ip_address"].(string))
urlValues.Add("mac", net_data["mac"].(string))
_, err := ctrl.decortAPICall("POST", ComputeNetDetachAPI, urlValues)
if err != nil {
// failed to detach this network - there will be partial resource update
log.Debugf("utilityComputeNetworksConfigure: failed to detach net ID %d of type %s from Compute ID %s: %s",
netRec.ID, netRec.Type, d.Id(), err)
log.Errorf("utilityComputeNetworksConfigure: failed to detach net ID %d of type %s from Compute ID %s: %s",
net_data["net_id"].(int), net_data["net_type"].(string), d.Id(), err)
apiErrCount++
lastSavedError = err
}
}
for _, netRec := range attachList {
attach_set := new_set.(*schema.Set).Difference(old_set.(*schema.Set))
log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", attach_set.Len(), d.Id())
for _, runner := range attach_set.List() {
urlValues := &url.Values{}
net_data := runner.(map[string]interface{})
urlValues.Add("computeId", d.Id())
urlValues.Add("netId", fmt.Sprintf("%d",netRec.ID))
urlValues.Add("netType", netRec.Type)
if netRec.IPAddress != "" {
urlValues.Add("ipAddr", netRec.IPAddress)
urlValues.Add("netId", fmt.Sprintf("%d",net_data["net_id"].(int)))
urlValues.Add("netType", net_data["net_type"].(string))
if net_data["ip_address"].(string) != "" {
urlValues.Add("ipAddr", net_data["ip_address"].(string))
}
_, err := ctrl.decortAPICall("POST", ComputeNetAttachAPI, urlValues)
if err != nil {
// failed to attach this network - there will be partial resource update
log.Debugf("utilityComputeNetworksConfigure: failed to attach net ID %d of type %s from Compute ID %s: %s",
netRec.ID, netRec.Type, d.Id(), err)
log.Errorf("utilityComputeNetworksConfigure: failed to attach net ID %d of type %s to Compute ID %s: %s",
net_data["net_id"].(int), net_data["net_type"].(string), d.Id(), err)
apiErrCount++
lastSavedError = err
}
@@ -300,7 +247,11 @@ func (ctrl *ControllerCfg) utilityComputeNetworksConfigure(d *schema.ResourceDat
return nil
}
func utilityComputeCheckPresence(d *schema.ResourceData, m interface{}) (string, error) {
//func (ctrl *ControllerCfg) utilityComputePfwConfigure(d *schema.ResourceData, do_delta bool) error {
//}
func utilityComputeCheckPresence(d *schema.ResourceData, m interface{}) (int, string, error) {
// This function tries to locate Compute by one of the following approaches:
// - if compute_id is specified - locate by compute ID
// - if compute_name is specified - locate by a combination of compute name and resource
@@ -337,27 +288,27 @@ func utilityComputeCheckPresence(d *schema.ResourceData, m interface{}) (string,
urlValues.Add("computeId", fmt.Sprintf("%d", theId))
computeFacts, err := controller.decortAPICall("POST", ComputeGetAPI, urlValues)
if err != nil {
return "", err
return 0, "", err
}
return computeFacts, nil
return theId, computeFacts, nil
}
// ID was not set in the schema upon entering this function - work through Compute name
// and RG ID
computeName, argSet := d.GetOk("name")
if !argSet {
return "", fmt.Errorf("Cannot locate compute instance if name is empty and no compute ID specified")
return 0, "", fmt.Errorf("Cannot locate compute instance if name is empty and no compute ID specified")
}
rgId, argSet := d.GetOk("rg_id")
if !argSet {
return "", fmt.Errorf("Cannot locate compute by name %s if no resource group ID is set", computeName.(string))
return 0, "", fmt.Errorf("Cannot locate compute by name %s if no resource group ID is set", computeName.(string))
}
urlValues.Add("rgId", fmt.Sprintf("%d", rgId))
apiResp, err := controller.decortAPICall("POST", RgListComputesAPI, urlValues)
if err != nil {
return "", err
return 0, "", err
}
log.Debugf("utilityComputeCheckPresence: ready to unmarshal string %s", apiResp)
@@ -365,7 +316,7 @@ func utilityComputeCheckPresence(d *schema.ResourceData, m interface{}) (string,
computeList := RgListComputesResp{}
err = json.Unmarshal([]byte(apiResp), &computeList)
if err != nil {
return "", err
return 0, "", err
}
// log.Printf("%#v", computeList)
@@ -379,11 +330,83 @@ func utilityComputeCheckPresence(d *schema.ResourceData, m interface{}) (string,
cgetValues.Add("computeId", fmt.Sprintf("%d", item.ID))
apiResp, err = controller.decortAPICall("POST", ComputeGetAPI, cgetValues)
if err != nil {
return "", err
return 0, "", err
}
return apiResp, nil
// NOTE: compute ID is unsigned int in the platform. Here we convert it to int, which may have
// unwanted side effects when the number of compute instances grows
return int(item.ID), apiResp, nil
}
}
return "", nil // there should be no error if Compute does not exist
return 0, "", nil // there should be no error if Compute does not exist
}
// This function reads port forwards from a specified compute and returns them (if any) in a
// form of a list of maps of interfaces suitable to be used for d.Set("pfw_rule") on the
// network block, corresponding to the ViNS these rules belong to. To simlify this network
// block identification among multiple blocks of the same compute this function also
// returns the ID of the ViNS associated with listed rules.
func utilityComputePfwGet(compId int, m interface{}) (int, []map[string]interface{}, error) {
// If there is an error either reading portforward rules from the cloud or parsing them, error is
// returned.
// In case there are no portforwarding rules for this compute, err = nil and rule record list is empty.
// Otherwise, both prefix record and rule record list contain meaningful data.
controller := m.(*ControllerCfg)
urlValues := &url.Values{}
pfwPrefix := PfwPrefixRecord{}
pfwRules := []PfwRuleRecord{}
pfwRulesList := []map[string]interface{}{}
urlValues.Add("computeId", fmt.Sprintf("%d", compId))
apiResp, err := controller.decortAPICall("POST", ComputePfwListAPI, urlValues)
if err != nil {
return 0, pfwRulesList, err
}
if apiResp == "" {
// No port forward rules defined for this compute
return 0, pfwRulesList, nil
}
log.Debugf("utilityComputePfwGet: ready to split API response string %s", apiResp)
twoParts := strings.SplitN(apiResp, "},", 2)
if len(twoParts) != 2 {
log.Errorf("utilityComputePfwGet: non-empty pfwList response for compute ID %d failed to split into 2 fragments (got %d)", compId, len(twoParts))
return 0, pfwRulesList, fmt.Errorf("Non-empty pfwList response failed to split into 2 fragments")
}
prefixResp := strings.TrimSuffix(strings.TrimPrefix(twoParts[0], "["), ",") + "}"
log.Debugf("utilityComputePfwGet: ready to unmarshal prefix part %s", prefixResp)
err = json.Unmarshal([]byte(prefixResp), &pfwPrefix)
if err != nil {
log.Errorf("utilityComputePfwGet: failed to unmarshal prefix part of API response: %s", err)
return 0, pfwRulesList, err
}
rulesResp := "[" + twoParts[1]
log.Debugf("utilityComputePfwGet: ready to unmarshal rules part %s", rulesResp)
err = json.Unmarshal([]byte(rulesResp), &pfwRules)
if err != nil {
log.Errorf("utilityComputePfwGet: failed to unmarshal rules part of API response: %s", err)
return 0, pfwRulesList, err
}
log.Debugf("utilityComputePfwGet: successfully read %d port forward rules for Compute ID %d, ViNS ID %d",
len(pfwRules), compId, pfwPrefix.VinsID)
for _, runner := range pfwRules {
rule := map[string]interface{}{
"pub_port_start": runner.PublicPortStart,
"pub_port_end": runner.PublicPortEnd,
"local_port": runner.LocalPort,
"proto": runner.Protocol,
}
pfwRulesList = append(pfwRulesList, rule)
}
return pfwPrefix.VinsID, pfwRulesList, nil
}