Compare commits

..

14 Commits

Author SHA1 Message Date
Sergey Shubin svs1370
a5f6c60a71 Improve stability of PFW check presence method 2021-10-11 19:29:17 +03:00
Sergey Shubin svs1370
105273af48 Implement more robust create and delete methods for PFW 2021-10-08 12:17:53 +03:00
Sergey Shubin svs1370
e501417166 PFW data source and resource initial implementation 2021-10-07 23:12:10 +03:00
Sergey Shubin svs1370
8058b1c08f Deprecate ssh_keys argument to decort_kvmvm, as it is replaced by cloud_init 2021-10-06 11:19:01 +03:00
Sergey Shubin svs1370
fb83398df9 Update README 2021-10-06 10:53:41 +03:00
Sergey Shubin svs1370
a63a35ca93 Switch to native Set methods for network definition blok, hardcode disk type D 2021-10-05 17:47:08 +03:00
Sergey Shubin svs1370
cf669a7a72 Make sure extra disks (if any) are detached from compute being deleted 2021-10-05 15:01:09 +03:00
Sergey Shubin svs1370
8da3f8d348 Moving from TypeList to TypeSet for network and extra_disks attributes of compute 2021-10-04 19:55:03 +03:00
Sergey Shubin svs1370
d0fdf38473 Add validation function for pool argument of disk resource 2021-10-02 10:15:32 +03:00
Sergey Shubin svs1370
92c52794d4 Update README file 2021-09-21 14:18:46 +03:00
Sergey Shubin svs1370
7480160dc5 Implement ViNS resource update method 2021-09-17 11:40:47 +03:00
Sergey Shubin svs1370
e86224f95f Implementing ViNS resource and polishing logic in other methods 2021-09-16 17:06:18 +03:00
Sergey Shubin svs1370
1ea7a27b21 Add image ID validation to kvmvm resource mgmt 2021-09-16 09:44:15 +03:00
Sergey Shubin svs1370
ec4e63c2c8 Add cloud_init parameter to compute resource management 2021-09-10 18:14:31 +03:00
24 changed files with 1381 additions and 278 deletions

View File

@@ -1,12 +1,13 @@
# terraform-provider-decort
Terraform provider for Digital Energy Cloud Orchestration Technology (DECORT) platform
NOTE: this provider is designed for DECORT API 3.6.x. For older API versions please use
Terraform DECS provider (https://github.com/rudecs/terraform-provider-decs).
NOTE: provider rc-1.40 is designed for DECORT API 3.7.x. For older API versions please use:
- DECORT API 3.6.x versions - provider version rc-1.10
- DECORT API versions prior to 3.6.0 - Terraform DECS provider (https://github.com/rudecs/terraform-provider-decs)
With this provider you can manage Compute instances, disks and resource groups in DECORT platform,
as well as query the platform for information about existing resources. This provider supports
Import operations on pre-existing resources.
With this provider you can manage Compute instances, disks, virtual network segments and resource
groups in DECORT platform, as well as query the platform for information about existing resources.
This provider supports Import operations on pre-existing resources.
See user guide at https://github.com/rudecs/terraform-provider-decort/wiki

View File

@@ -132,7 +132,7 @@ func ControllerConfigure(d *schema.ResourceData) (*ControllerCfg, error) {
}
ret_config.auth_mode_code = MODE_LEGACY
default:
return nil, fmt.Errorf("Unknown authenticator mode %q provided.", ret_config.auth_mode_txt)
return nil, fmt.Errorf("Unknown authenticator mode %s provided.", ret_config.auth_mode_txt)
}
if allow_unverified_ssl {
@@ -205,7 +205,7 @@ func (config *ControllerCfg) getOAuth2JWT() (string, error) {
return "", fmt.Errorf("getOAuth2JWT method called for undefined authorization mode.")
}
if config.auth_mode_code != MODE_OAUTH2 {
return "", fmt.Errorf("getOAuth2JWT method called for incompatible authorization mode %q.", config.auth_mode_txt)
return "", fmt.Errorf("getOAuth2JWT method called for incompatible authorization mode %s.", config.auth_mode_txt)
}
params := url.Values{}
@@ -231,7 +231,7 @@ func (config *ControllerCfg) getOAuth2JWT() (string, error) {
// fmt.Println("response Status:", resp.Status)
// fmt.Println("response Headers:", resp.Header)
// fmt.Println("response Headers:", req.URL)
return "", fmt.Errorf("getOauth2JWT: unexpected status code %d when obtaining JWT from %q for APP_ID %q, request Body %q",
return "", fmt.Errorf("getOauth2JWT: unexpected status code %d when obtaining JWT from %s for APP_ID %s, request Body %s",
resp.StatusCode, req.URL, config.app_id, params_str)
}
defer resp.Body.Close()
@@ -279,7 +279,7 @@ func (config *ControllerCfg) validateJWT(jwt string) (bool, error) {
return false, err
}
if resp.StatusCode != http.StatusOK {
return false, fmt.Errorf("validateJWT: unexpected status code %d when validating JWT against %q.",
return false, fmt.Errorf("validateJWT: unexpected status code %d when validating JWT against %s.",
resp.StatusCode, req.URL)
}
defer resp.Body.Close()
@@ -298,7 +298,7 @@ func (config *ControllerCfg) validateLegacyUser() (bool, error) {
return false, fmt.Errorf("validateLegacyUser method called for undefined authorization mode.")
}
if config.auth_mode_code != MODE_LEGACY {
return false, fmt.Errorf("validateLegacyUser method called for incompatible authorization mode %q.", config.auth_mode_txt)
return false, fmt.Errorf("validateLegacyUser method called for incompatible authorization mode %s.", config.auth_mode_txt)
}
params := url.Values{}
@@ -319,7 +319,7 @@ func (config *ControllerCfg) validateLegacyUser() (bool, error) {
return false, err
}
if resp.StatusCode != http.StatusOK {
return false, fmt.Errorf("validateLegacyUser: unexpected status code %d when validating legacy user %q against %q.",
return false, fmt.Errorf("validateLegacyUser: unexpected status code %d when validating legacy user %s against %s.",
resp.StatusCode, config.legacy_user, config.controller_url)
}
defer resp.Body.Close()
@@ -335,13 +335,15 @@ func (config *ControllerCfg) validateLegacyUser() (bool, error) {
return true, nil
}
func (config *ControllerCfg) decortAPICall(method string, api_name string, url_values *url.Values) (json_resp string, err error) {
func (config *ControllerCfg) decortAPICall(method string, api_name string, url_values *url.Values) (json_resp string, err error, hrc int) {
// This is a convenience wrapper around standard HTTP request methods that is aware of the
// authorization mode for which the provider was initialized and compiles request accordingly.
hrc = 0 // HTTP Response Code
if config.cc_client == nil {
// this should never happen if ClientConfig was properly called prior to decortAPICall
return "", fmt.Errorf("decortAPICall method called with unconfigured DECORT cloud controller HTTP client.")
return "", fmt.Errorf("decortAPICall method called with unconfigured DECORT cloud controller HTTP client."), 0
}
// Example: to create api_params, one would generally do the following:
@@ -359,7 +361,7 @@ func (config *ControllerCfg) decortAPICall(method string, api_name string, url_v
//
if config.auth_mode_code == MODE_UNDEF {
return "", fmt.Errorf("decortAPICall method called for unknown authorization mode.")
return "", fmt.Errorf("decortAPICall method called for unknown authorization mode."), 0
}
if config.auth_mode_code == MODE_LEGACY {
@@ -369,7 +371,7 @@ func (config *ControllerCfg) decortAPICall(method string, api_name string, url_v
req, err := http.NewRequest(method, config.controller_url + api_name, strings.NewReader(params_str))
if err != nil {
return "", err
return "", err, 0
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.Header.Set("Content-Length", strconv.Itoa(len(params_str)))
@@ -380,29 +382,29 @@ func (config *ControllerCfg) decortAPICall(method string, api_name string, url_v
resp, err := config.cc_client.Do(req)
if err != nil {
return "", err
return "", err, 0
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
tmp_body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
return "", err, resp.StatusCode
}
json_resp := Jo2JSON(string(tmp_body))
log.Debugf("decortAPICall: %s %s\n %s", method, api_name, json_resp)
return json_resp, nil
return json_resp, nil, resp.StatusCode
} else {
return "", fmt.Errorf("decortAPICall: unexpected status code %d when calling API %q with request Body %q",
resp.StatusCode, req.URL, params_str)
return "", fmt.Errorf("decortAPICall: unexpected status code %d when calling API %s with request Body %s",
resp.StatusCode, req.URL, params_str), resp.StatusCode
}
/*
if resp.StatusCode == StatusServiceUnavailable {
return nil, fmt.Errorf("decortAPICall method called for incompatible authorization mode %q.", config.auth_mode_txt)
return nil, fmt.Errorf("decortAPICall method called for incompatible authorization mode %s.", config.auth_mode_txt), resp.StatusCode
}
*/
return "", err
return "", err, resp.StatusCode
}

View File

@@ -65,6 +65,8 @@ func parseComputeDisksToExtraDisks(disks []DiskRecord) []interface{} {
return result
}
// NOTE: this is a legacy function, which is not used as of rc-1.10
// Use "parseComputeDisksToExtraDisks" instead
func parseComputeDisks(disks []DiskRecord) []interface{} {
// Return value was designed to d.Set("disks",) item of dataSourceCompute schema
// However, this item was excluded from the schema as it is not directly
@@ -80,21 +82,20 @@ func parseComputeDisks(disks []DiskRecord) []interface{} {
}
*/
result := make([]interface{}, length)
result := []interface{}{}
if length == 0 {
return result
}
elem := make(map[string]interface{})
for i, value := range disks {
for _, value := range disks {
/*
if value.Type == "B" {
// skip boot disk when parsing the list of disks
continue
}
*/
elem := make(map[string]interface{})
// keys in this map should correspond to the Schema definition
// as returned by dataSourceDiskSchemaMake()
elem["name"] = value.Name
@@ -111,7 +112,8 @@ func parseComputeDisks(disks []DiskRecord) []interface{} {
// elem["status"] = value.Status
// elem["tech_status"] = value.TechStatus
elem["compute_id"] = value.ComputeID
result[i] = elem
result = append(result, elem)
}
return result
@@ -149,14 +151,14 @@ func parseBootDiskId(disks []DiskRecord) uint {
// Parse the list of interfaces from compute/get response into a list of networks
// attached to this compute
func parseComputeInterfacesToNetworks(ifaces []InterfaceRecord) []map[string]interface{} {
// return value will be used to d.Set("network",) item of dataSourceCompute schema
func parseComputeInterfacesToNetworks(ifaces []InterfaceRecord) []interface{} {
// return value will be used to d.Set("network") item of dataSourceCompute schema
length := len(ifaces)
log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length)
result := make([]map[string]interface{}, length, length)
result := []interface{}{}
for i, value := range ifaces {
for _, value := range ifaces {
elem := make(map[string]interface{})
// Keys in this map should correspond to the Schema definition
// as returned by networkSubresourceSchemaMake()
@@ -167,12 +169,13 @@ func parseComputeInterfacesToNetworks(ifaces []InterfaceRecord) []map[string]int
// log.Debugf(" element %d: net_id=%d, net_type=%s", i, value.NetID, value.NetType)
result[i] = elem
result = append(result, elem)
}
return result
}
// NOTE: this function is retained for historical purposes and actually not used as of rc-1.10
func parseComputeInterfaces(ifaces []InterfaceRecord) []map[string]interface{} {
// return value was designed to d.Set("interfaces",) item of dataSourceCompute schema
// However, this item was excluded from the schema as it is not directly
@@ -212,7 +215,7 @@ func parseComputeInterfaces(ifaces []InterfaceRecord) []map[string]interface{} {
}
func flattenCompute(d *schema.ResourceData, compFacts string) error {
// This function expects that comp_facts string contains response from API compute/get,
// This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance.
//
// NOTE: this function modifies ResourceData argument - as such it should never be called
@@ -241,6 +244,7 @@ func flattenCompute(d *schema.ResourceData, compFacts string) error {
d.Set("boot_disk_id", parseBootDiskId(model.Disks)) // we may need boot disk ID in resize operations
d.Set("image_id", model.ImageID)
d.Set("description", model.Desc)
d.Set("cloud_init", "applied") // NOTE: for existing compute we hard-code this value as an indicator for DiffSuppress fucntion
// d.Set("status", model.Status)
// d.Set("tech_status", model.TechStatus)
@@ -298,6 +302,8 @@ func dataSourceCompute() *schema.Resource {
Description: "Name of this compute instance. NOTE: this parameter is case sensitive.",
},
// TODO: consider removing compute_id from the schema, as it not practical to call this data provider if
// corresponding compute ID is already known
"compute_id": {
Type: schema.TypeInt,
Optional: true,
@@ -371,7 +377,7 @@ func dataSourceCompute() *schema.Resource {
},
"extra_disks": {
Type: schema.TypeList,
Type: schema.TypeSet,
Computed: true,
MaxItems: MaxExtraDisksPerCompute,
Elem: &schema.Schema {
@@ -392,7 +398,7 @@ func dataSourceCompute() *schema.Resource {
*/
"network": {
Type: schema.TypeList,
Type: schema.TypeSet,
Optional: true,
MaxItems: MaxNetworksPerCompute,
Elem: &schema.Resource{
@@ -427,6 +433,12 @@ func dataSourceCompute() *schema.Resource {
Description: "User-defined text description of this compute instance.",
},
"cloud_init": {
Type: schema.TypeString,
Computed: true,
Description: "Placeholder for cloud_init parameters.",
},
/*
"status": {
Type: schema.TypeString,

View File

@@ -67,18 +67,6 @@ func flattenDisk(d *schema.ResourceData, disk_facts string) error {
// d.Set("compute_id", model.ComputeID)
d.Set("description", model.Desc)
// d.Set("status", model.Status)
// d.Set("tech_status", model.TechStatus)
/* we do not manage snapshots via Terraform yet (and probably, never will), so
// keep this block commented out for a while
if len(model.Snapshots) > 0 {
log.Debugf("flattenDisk: calling flattenDiskSnapshots")
if err = d.Set("nics", flattenDiskSnapshots(model.Snapshots)); err != nil {
return err
}
}
*/
return nil
}
@@ -137,7 +125,7 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
"type": {
Type: schema.TypeString,
Computed: true,
Description: "Type of this disk.",
Description: "Type of this disk. E.g. D for data disks, B for boot.",
},
"description": {

View File

@@ -45,7 +45,7 @@ func dataSourceImageRead(d *schema.ResourceData, m interface{}) error {
if accSet {
url_values.Add("accountId", fmt.Sprintf("%d", accId.(int)))
}
body_string, err := controller.decortAPICall("POST", ImagesListAPI, url_values)
body_string, err, _ := controller.decortAPICall("POST", ImagesListAPI, url_values)
if err != nil {
return err
}

144
decort/data_source_pfw.go Normal file
View File

@@ -0,0 +1,144 @@
/*
Copyright (c) 2019-2021 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package decort
import (
"encoding/json"
"fmt"
// "hash/fnv"
log "github.com/sirupsen/logrus"
// "net/url"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
func flattenPfw(d *schema.ResourceData, pfwFacts string) error {
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourcePfwExists(...) method
pfwRecord := ComputePfwListResp{}
err := json.Unmarshal([]byte(pfwFacts), &pfwRecord)
if err != nil {
return err
}
log.Debugf("flattenPfw: decoded %d PFW rules for compute ID %s on ViNS ID %d",
len(pfwRecord.Rules), pfwRecord.Header.VinsID, pfwRecord.Header.VinsID)
/*
Here it gets a little bit interesting.
Unlike compute or disk, port forwaring rules are NOT represented by any cloud
platform resource, which might have had a unique ID. They are just a subset of
rules in the list maintained by the corresponding ViNS instance. However,
Terraform needs a unique ID for each resource it manages so that it could be
stored in the state file and retrieved for use.
Therefore we need to make up an ID and supply it to Terraform in a standard
way (i.e. by calling d.SetId(...)).
Fortunately, a combination of Compute ID and ViNS ID with GW VNF, where this
compute is plugged in, makes a unique string, so we use it as an ID for
the PFW ruleset.
The following few lines are legacy from the first attempt to make an ID
as a hash of concatenated Compute ID & ViNS ID, but it did not work as
expected for a number of reasons, which explanation is not a primary
intent of the comment in the source code.
combo := fmt.Sprintf("%d:%d", compId.(int), pfwRecord.ViNS.VinsID)
hasher := fnv.New32a()
hasher.Write([]byte(combo))
d.SetId(fmt.Sprintf("%d", hasher.Sum32()))
*/
// set ID of this PFW rule set as "compute_id:vins_id"
d.SetId(fmt.Sprintf("%d:%d", pfwRecord.Header.ComputeID, pfwRecord.Header.VinsID))
log.Debugf("flattenPfw: PFW rule set ID %s", d.Id())
d.Set("compute_id", pfwRecord.Header.ComputeID)
d.Set("vins_id", pfwRecord.Header.VinsID)
pfwRulesList := []interface{}{}
for _, runner := range pfwRecord.Rules {
rule := map[string]interface{}{
"pub_port_start": runner.PublicPortStart,
"pub_port_end": runner.PublicPortEnd,
"local_port": runner.LocalPort,
"proto": runner.Protocol,
"rule_id": runner.ID,
}
pfwRulesList = append(pfwRulesList, rule)
}
if err = d.Set("rule", pfwRulesList); err != nil {
return err
}
return nil
}
func dataSourcePfwRead(d *schema.ResourceData, m interface{}) error {
pfwFacts, err := utilityPfwCheckPresence(d, m)
if pfwFacts == "" {
// if empty string is returned from dataSourcePfwRead then we got no
// PFW rules. It could also be because there was some error, which
// is indicated by non-nil err value
d.SetId("") // ensure ID is empty in this case anyway
return err
}
return flattenPfw(d, pfwFacts)
}
func dataSourcePfw() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
Read: dataSourcePfwRead,
Timeouts: &schema.ResourceTimeout{
Read: &Timeout30s,
Default: &Timeout60s,
},
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the compute instance to configure port forwarding rules for.",
},
"vins_id": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the ViNS to configure port forwarding rules on. Compute must be already plugged into this ViNS and ViNS must have external network connection.",
},
// TODO: consider making "rule" attribute Required with MinItems = 1
"rule": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: rulesSubresourceSchemaMake(),
},
Description: "Port forwarding rule. You may specify several rules, one in each such block.",
},
},
}
}

View File

@@ -51,12 +51,14 @@ func flattenVins(d *schema.ResourceData, vins_facts string) error {
vinsRecord.Name, vinsRecord.ID, vinsRecord.AccountID, vinsRecord.RgID)
d.SetId(fmt.Sprintf("%d", vinsRecord.ID))
d.Set("name", vinsRecord.Name)
d.Set("account_id", vinsRecord.AccountID)
d.Set("account_name", vinsRecord.AccountName)
err = d.Set("rg_id", vinsRecord.RgID)
d.Set("description", vinsRecord.Desc)
d.Set("ipcidr", vinsRecord.IPCidr)
noExtNetConnection := true
for _, value := range vinsRecord.VNFs {
if value.Type == "GW" {
log.Debugf("flattenVins: discovered GW VNF ID %d in ViNS ID %d", value.ID, vinsRecord.ID)
@@ -69,10 +71,16 @@ func flattenVins(d *schema.ResourceData, vins_facts string) error {
} else {
return fmt.Errorf("Failed to unmarshal VNF GW Config - structure is invalid.")
}
noExtNetConnection = false
break
}
}
if noExtNetConnection {
d.Set("ext_ip_addr", "")
d.Set("ext_net_id", 0)
}
log.Debugf("flattenVins: EXTRA CHECK - schema rg_id=%d, ext_net_id=%d", d.Get("rg_id").(int), d.Get("ext_net_id").(int))
return nil
@@ -104,7 +112,7 @@ func dataSourceVins() *schema.Resource {
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Optional: true,
Required: true,
Description: "Name of the ViNS. Names are case sensitive and unique within the context of an account or resource group.",
},

View File

@@ -44,6 +44,7 @@ func diskSubresourceSchemaMake() map[string]*schema.Schema {
"account_id": {
Type: schema.TypeInt,
Computed: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the account this disk belongs to.",
},

View File

@@ -201,6 +201,9 @@ type KvmVmCreateParam struct { // this is unified structure for both x86 and PPC
Start bool `json:"start"`
}
// structures related to cloudapi/compute/start API
const ComputeStartAPI = "/restmachine/cloudapi/compute/start"
// structures related to cloudapi/compute/delete API
const ComputeDeleteAPI = "/restmachine/cloudapi/compute/delete"
@@ -443,7 +446,20 @@ type AccountsListResp []AccountRecord
//
// structures related to /cloudapi/portforwarding/list API
//
type PfwRecord struct {
// Note the specifics of compute/pfwList response in API 3.7.x (this may be changed in the future):
// 1) if there are no PFW rules and compute is not connected to any PFW-able ViNS
// the response will be empty string
// 2) if there are no PFW rules but compute is connected to a PFW-able ViNS
// the response will contain a list with a single element - prefix (see PfwPrefixRecord)
// 3) if there are port forwarding rules, the response will contain a list which starts
// with prefix (see PfwPrefixRecord) and then followed by one or more rule records
// (see PfwRuleRecord)
type PfwPrefixRecord struct {
VinsID int `json:"vinsId"`
VinsName string `json:"vinsName"`
ComputeID int `json:"computeId"`
}
type PfwRuleRecord struct {
ID int `json:"id"`
LocalIP string `json:"localIp"`
LocalPort int `json:"localPort"`
@@ -453,9 +469,12 @@ type PfwRecord struct {
ComputeID int `json:"vmId"`
}
const ComputePfwListAPI = "/restmachine/cloudapi/compute/pfwList"
type ComputePfwListResp struct {
Header PfwPrefixRecord `json:"header"`
Rules []PfwRuleRecord `json:"rules"`
}
type ComputePfwListResp []PfwRecord
const ComputePfwListAPI = "/restmachine/cloudapi/compute/pfwList"
const ComputePfwAddAPI = "/restmachine/cloudapi/compute/pfwAdd"
@@ -559,7 +578,12 @@ type VinsRecord struct { // represents part of the response from API vins/get
const VinsGetAPI = "/restmachine/cloudapi/vins/get"
const VinsCreateAPI = "/restmachine/cloudapi/vins/create"
const VinsCreateInAccountAPI = "/restmachine/cloudapi/vins/createInAccount"
const VinsCreateInRgAPI = "/restmachine/cloudapi/vins/createInRG"
const VinsExtNetConnectAPI = "/restmachine/cloudapi/vins/extNetConnect"
const VinsExtNetDisconnectAPI = "/restmachine/cloudapi/vins/extNetDisconnect"
const VinsDeleteAPI = "/restmachine/cloudapi/vins/delete"
//

View File

@@ -21,11 +21,15 @@ import (
// "encoding/json"
// "fmt"
"bytes"
"hash/fnv"
log "github.com/sirupsen/logrus"
// "net/url"
"sort"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
// "github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode"
)
// This is subresource of compute resource used when creating/managing compute network connections
@@ -39,6 +43,71 @@ func networkSubresIPAddreDiffSupperss(key, oldVal, newVal string, d *schema.Reso
return true // suppress difference
}
// This function is based on the original Terraform SerializeResourceForHash found
// in helper/schema/serialize.go
// It skips network subresource attributes, which are irrelevant for identification
// of unique network blocks
func networkSubresourceSerialize(output *bytes.Buffer, val interface{}, resource *schema.Resource) {
if val == nil {
return
}
rs := resource.Schema
m := val.(map[string]interface{})
var keys []string
allComputed := true
for k, val := range rs {
if val.Optional || val.Required {
allComputed = false
}
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
// explicitly ignore "ip_address" when hashing
if k == "ip_address" {
continue
}
subSchema := rs[k]
// Skip attributes that are not user-provided. Computed attributes
// do not contribute to the hash since their ultimate value cannot
// be known at plan/diff time.
if !allComputed && !(subSchema.Required || subSchema.Optional) {
continue
}
output.WriteString(k)
output.WriteRune(':')
value := m[k]
schema.SerializeValueForHash(output, value, subSchema)
}
}
// HashNetworkSubresource hashes network subresource of compute resource. It uses
// specially designed networkSubresourceSerialize (see above) to make sure hashing
// does not involve attributes that we deem irrelevant to the uniqueness of network
// subresource definitions.
// It is this function that should be specified as SchemaSetFunc when creating Set
// from network subresource (e.g. in flattenCompute)
//
// This function is based on the original Terraform function HashResource from
// helper/schema/set.go
func HashNetworkSubresource(resource *schema.Resource) schema.SchemaSetFunc {
return func(v interface{}) int {
var serialized bytes.Buffer
networkSubresourceSerialize(&serialized, v, resource)
hs := fnv.New32a()
hs.Write(serialized.Bytes())
return int(hs.Sum32())
}
}
func networkSubresourceSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"net_type": {
@@ -58,6 +127,7 @@ func networkSubresourceSchemaMake() map[string]*schema.Schema {
"ip_address": {
Type: schema.TypeString,
Optional: true,
Computed: true,
DiffSuppressFunc: networkSubresIPAddreDiffSupperss,
Description: "Optional IP address to assign to this connection. This IP should belong to the selected network and free for use.",
},

View File

@@ -102,8 +102,8 @@ func Provider() *schema.Provider {
"decort_resgroup": resourceResgroup(),
"decort_kvmvm": resourceCompute(),
"decort_disk": resourceDisk(),
// "decort_vins": resourceVins(),
// "decort_pfw": resourcePfw(),
"decort_vins": resourceVins(),
"decort_pfw": resourcePfw(),
},
DataSourcesMap: map[string]*schema.Resource{
@@ -113,7 +113,7 @@ func Provider() *schema.Provider {
"decort_image": dataSourceImage(),
"decort_disk": dataSourceDisk(),
"decort_vins": dataSourceVins(),
// "decort_pfw": dataSourcePfw(),
"decort_pfw": dataSourcePfw(),
},
ConfigureFunc: providerConfigure,

View File

@@ -25,7 +25,7 @@ Visit https://github.com/rudecs/terraform-provider-decort for full source code p
package decort
import (
// "encoding/json"
"encoding/json"
"fmt"
"net/url"
"strconv"
@@ -36,6 +36,19 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
func cloudInitDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool {
if oldVal == "" && newVal != "applied" {
// if old value for "cloud_init" resource is empty string, it means that we are creating new compute
// and there is a chance that the user will want custom cloud init parameters - so we check if
// cloud_init is explicitly set in TF file by making sure that its new value is different from "applied",
// which is a reserved key word.
log.Debugf("cloudInitDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal)
return false // there is a difference between stored and new value
}
log.Debugf("cloudInitDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=TRUE", key, oldVal, newVal)
return true // suppress difference
}
func resourceComputeCreate(d *schema.ResourceData, m interface{}) error {
// we assume all mandatory parameters it takes to create a comptue instance are properly
// specified - we rely on schema "Required" attributes to let Terraform validate them for us
@@ -55,19 +68,21 @@ func resourceComputeCreate(d *schema.ResourceData, m interface{}) error {
urlValues.Add("imageId", fmt.Sprintf("%d", d.Get("image_id").(int)))
urlValues.Add("bootDisk", fmt.Sprintf("%d", d.Get("boot_disk_size").(int)))
urlValues.Add("netType", "NONE") // at the 1st step create isolated compute
// urlValues.Add("start", "false") // at the 1st step create compute in a stopped state
urlValues.Add("start", "0") // at the 1st step create compute in a stopped state
argVal, argSet := d.GetOk("description")
if argSet {
urlValues.Add("desc", argVal.(string))
}
/*
sshKeysVal, sshKeysSet := d.GetOk("ssh_keys")
if sshKeysSet {
// process SSH Key settings and set API values accordingly
log.Debugf("resourceComputeCreate: calling makeSshKeysArgString to setup SSH keys for guest login(s)")
urlValues.Add("userdata", makeSshKeysArgString(sshKeysVal.([]interface{})))
}
*/
computeCreateAPI := KvmX86CreateAPI
arch := d.Get("arch").(string)
@@ -77,8 +92,17 @@ func resourceComputeCreate(d *schema.ResourceData, m interface{}) error {
} else { // note that we do not validate arch value for explicit "KVM_X86" here
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
}
argVal, argSet = d.GetOk("cloud_init")
if argSet {
// userdata must not be empty string and must not be a reserved keyword "applied"
userdata := argVal.(string)
if userdata != "" && userdata != "applied" {
urlValues.Add("userdata", userdata)
}
}
apiResp, err := controller.decortAPICall("POST", computeCreateAPI, urlValues)
apiResp, err, _ := controller.decortAPICall("POST", computeCreateAPI, urlValues)
if err != nil {
return err
}
@@ -92,21 +116,23 @@ func resourceComputeCreate(d *schema.ResourceData, m interface{}) error {
d.SetPartial("ram")
d.SetPartial("image_id")
d.SetPartial("boot_disk_size")
/*
if sshKeysSet {
d.SetPartial("ssh_keys")
}
*/
log.Debugf("resourceComputeCreate: new simple Compute ID %d, name %s created", compId, d.Get("name").(string))
// Configure data disks if any
extraDisksOk := true
argVal, argSet = d.GetOk("extra_disks")
if argSet && len(argVal.([]interface{})) > 0 {
if argSet && argVal.(*schema.Set).Len() > 0 {
// urlValues.Add("desc", argVal.(string))
log.Debugf("resourceComputeCreate: calling utilityComputeExtraDisksConfigure to attach %d extra disk(s)", len(argVal.([]interface{})))
log.Debugf("resourceComputeCreate: calling utilityComputeExtraDisksConfigure to attach %d extra disk(s)", argVal.(*schema.Set).Len())
err = controller.utilityComputeExtraDisksConfigure(d, false) // do_delta=false, as we are working on a new compute
if err != nil {
log.Errorf("resourceComputeCreate: error when attaching extra disks to a new Compute ID %s: %s", compId, err)
log.Errorf("resourceComputeCreate: error when attaching extra disk(s) to a new Compute ID %s: %s", compId, err)
extraDisksOk = false
}
}
@@ -117,8 +143,8 @@ func resourceComputeCreate(d *schema.ResourceData, m interface{}) error {
// Configure external networks if any
netsOk := true
argVal, argSet = d.GetOk("network")
if argSet && len(argVal.([]interface{})) > 0 {
log.Debugf("resourceComputeCreate: calling utilityComputeNetworksConfigure to attach %d network(s)", len(argVal.([]interface{})))
if argSet && argVal.(*schema.Set).Len() > 0 {
log.Debugf("resourceComputeCreate: calling utilityComputeNetworksConfigure to attach %d network(s)", argVal.(*schema.Set).Len())
err = controller.utilityComputeNetworksConfigure(d, false) // do_delta=false, as we are working on a new compute
if err != nil {
log.Errorf("resourceComputeCreate: error when attaching networks to a new Compute ID %d: %s", compId, err)
@@ -134,6 +160,16 @@ func resourceComputeCreate(d *schema.ResourceData, m interface{}) error {
// if there were no errors in setting any of the subresources, we may leave Partial mode
d.Partial(false)
}
// Note bene: we created compute in a STOPPED state (this is required to properly attach 1st network interface),
// now we need to start it before we report the sequence complete
reqValues := &url.Values{}
reqValues.Add("computeId", fmt.Sprintf("%d", compId))
log.Debugf("resourceComputeCreate: starting Compute ID %d after completing its resource configuration", compId)
apiResp, err, _ = controller.decortAPICall("POST", ComputeStartAPI, reqValues)
if err != nil {
return err
}
log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", compId, d.Get("name").(string))
@@ -150,6 +186,7 @@ func resourceComputeRead(d *schema.ResourceData, m interface{}) error {
compFacts, err := utilityComputeCheckPresence(d, m)
if compFacts == "" {
d.SetId("")
if err != nil {
return err
}
@@ -207,7 +244,7 @@ func resourceComputeUpdate(d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceComputeUpdate: changing CPU %d -> %d and/or RAM %d -> %d",
oldCpu.(int), newCpu.(int),
oldRam.(int), newRam.(int))
_, err := controller.decortAPICall("POST", ComputeResizeAPI, params)
_, err, _ := controller.decortAPICall("POST", ComputeResizeAPI, params)
if err != nil {
return err
}
@@ -223,7 +260,7 @@ func resourceComputeUpdate(d *schema.ResourceData, m interface{}) error {
bdsParams.Add("size", fmt.Sprintf("%d", newSize.(int)))
log.Debugf("resourceComputeUpdate: compute ID %s, boot disk ID %d resize %d -> %d",
d.Id(), d.Get("boot_disk_id").(int), oldSize.(int), newSize.(int))
_, err := controller.decortAPICall("POST", DisksResizeAPI, params)
_, err, _ := controller.decortAPICall("POST", DisksResizeAPI, params)
if err != nil {
return err
}
@@ -257,8 +294,9 @@ func resourceComputeUpdate(d *schema.ResourceData, m interface{}) error {
func resourceComputeDelete(d *schema.ResourceData, m interface{}) error {
// NOTE: this function destroys target Compute instance "permanently", so
// there is no way to restore it. It also destroys all extra disks
// attached to this compute, so "User, ye be warned!"
// there is no way to restore it.
// If compute being destroyed has some extra disks attached, they are
// detached from the compute
log.Debugf("resourceComputeDelete: called for Compute name %s, RG ID %d",
d.Get("name").(string), d.Get("rg_id").(int))
@@ -269,12 +307,40 @@ func resourceComputeDelete(d *schema.ResourceData, m interface{}) error {
return nil
}
controller := m.(*ControllerCfg)
model := ComputeGetResp{}
log.Debugf("resourceComputeDelete: ready to unmarshal string %s", compFacts)
err = json.Unmarshal([]byte(compFacts), &model)
if err == nil && len(model.Disks) > 0 {
// prepare to detach data disks from compute - do it only if compFacts unmarshalled
// properly and the resulting model contains non-empty Disks list
for _, diskFacts := range model.Disks {
if diskFacts.Type == "B" {
// boot disk is never detached on compute delete
continue
}
log.Debugf("resourceComputeDelete: ready to detach data disk ID %d from compute ID %s", diskFacts.ID, d.Id())
detachParams := &url.Values{}
detachParams.Add("computeId", d.Id())
detachParams.Add("diskId", fmt.Sprintf("%d", diskFacts.ID))
_, err, _ = controller.decortAPICall("POST", ComputeDiskDetachAPI, detachParams)
if err != nil {
// We do not fail compute deletion on data disk detach errors
log.Errorf("resourceComputeDelete: error when detaching Disk ID %d: %s", diskFacts.ID, err)
}
}
}
params := &url.Values{}
params.Add("computeId", d.Id())
params.Add("permanently", "true")
controller := m.(*ControllerCfg)
_, err = controller.decortAPICall("POST", ComputeDeleteAPI, params)
params.Add("permanently", "1")
// TODO: this is for the upcoming API update - params.Add("detachdisks", "1")
_, err, _ = controller.decortAPICall("POST", ComputeDeleteAPI, params)
if err != nil {
return err
}
@@ -284,7 +350,7 @@ func resourceComputeDelete(d *schema.ResourceData, m interface{}) error {
func resourceComputeExists(d *schema.ResourceData, m interface{}) (bool, error) {
// Reminder: according to Terraform rules, this function should not modify its ResourceData argument
log.Debugf("resourceComputeExist: called for Compute name %q, RG ID %d",
log.Debugf("resourceComputeExist: called for Compute name %s, RG ID %d",
d.Get("name").(string), d.Get("rg_id").(int))
compFacts, err := utilityComputeCheckPresence(d, m)
@@ -360,17 +426,18 @@ func resourceCompute() *schema.Resource {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the OS image to base this compute instance on.",
},
"boot_disk_size": {
Type: schema.TypeInt,
Optional: true,
Description: "This compute instance boot disk size in GB.",
Required: true,
Description: "This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image.",
},
"extra_disks": {
Type: schema.TypeList,
Type: schema.TypeSet,
Optional: true,
MaxItems: MaxExtraDisksPerCompute,
Elem: &schema.Schema{
@@ -380,7 +447,7 @@ func resourceCompute() *schema.Resource {
},
"network": {
Type: schema.TypeList,
Type: schema.TypeSet,
Optional: true,
MaxItems: MaxNetworksPerCompute,
Elem: &schema.Resource{
@@ -389,6 +456,7 @@ func resourceCompute() *schema.Resource {
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.",
},
/*
"ssh_keys": {
Type: schema.TypeList,
Optional: true,
@@ -398,11 +466,21 @@ func resourceCompute() *schema.Resource {
},
Description: "SSH keys to authorize on this compute instance.",
},
*/
"description": {
Type: schema.TypeString,
Optional: true,
Description: "Description of this compute instance.",
Description: "Optional text description of this compute instance.",
},
"cloud_init": {
Type: schema.TypeString,
Optional: true,
Default: "applied",
DiffSuppressFunc: cloudInitDiffSupperss,
Description: "Optional cloud_init parameters. Applied when creating new compute instance only, ignored in all other cases.",
},
// The rest are Compute properties, which are "computed" once it is created

View File

@@ -46,7 +46,7 @@ func resourceDiskCreate(d *schema.ResourceData, m interface{}) error {
urlValues.Add("gid", fmt.Sprintf("%d", DefaultGridID)) // we use default Grid ID, which was obtained along with DECORT Controller init
urlValues.Add("name", d.Get("name").(string))
urlValues.Add("size", fmt.Sprintf("%d", d.Get("size").(int)))
urlValues.Add("type", d.Get("type").(string))
urlValues.Add("type", "D") // NOTE: only disks of Data type are managed via plugin
urlValues.Add("sep_id", fmt.Sprintf("%d", d.Get("sep_id").(int)))
urlValues.Add("pool", d.Get("pool").(string))
@@ -55,7 +55,7 @@ func resourceDiskCreate(d *schema.ResourceData, m interface{}) error {
urlValues.Add("description", argVal.(string))
}
apiResp, err := controller.decortAPICall("POST", DisksCreateAPI, urlValues)
apiResp, err, _ := controller.decortAPICall("POST", DisksCreateAPI, urlValues)
if err != nil {
return err
}
@@ -105,7 +105,7 @@ func resourceDiskUpdate(d *schema.ResourceData, m interface{}) error {
sizeParams := &url.Values{}
sizeParams.Add("diskId", d.Id())
sizeParams.Add("size", fmt.Sprintf("%d", newSize.(int)))
_, err := controller.decortAPICall("POST", DisksResizeAPI, sizeParams)
_, err, _ := controller.decortAPICall("POST", DisksResizeAPI, sizeParams)
if err != nil {
return err
}
@@ -121,17 +121,21 @@ func resourceDiskUpdate(d *schema.ResourceData, m interface{}) error {
renameParams := &url.Values{}
renameParams.Add("diskId", d.Id())
renameParams.Add("name", newName.(string))
_, err := controller.decortAPICall("POST", DisksRenameAPI, renameParams)
_, err, _ := controller.decortAPICall("POST", DisksRenameAPI, renameParams)
if err != nil {
return err
}
d.SetPartial("name")
}
/*
NOTE: plugin will manage disks of type "Data" only, and type cannot be changed once disk is created
oldType, newType := d.GetChange("type")
if oldType.(string) != newType.(string) {
return fmt.Errorf("resourceDiskUpdate: Disk ID %s - changing type of existing disk not allowed", d.Id())
}
*/
d.Partial(false)
@@ -157,17 +161,17 @@ func resourceDiskDelete(d *schema.ResourceData, m interface{}) error {
params := &url.Values{}
params.Add("diskId", d.Id())
// NOTE: we are not force-detaching disk from a compute (if attached) this protecting
// NOTE: we are not force-detaching disk from a compute (if attached) thus protecting
// data that may be on that disk from destruction.
// However, this may change in the future, as TF state management logic may want
// to delete disk resource BEFORE it is detached from compute instance, and, while
// perfectly OK from data preservation viewpoint, this is breaking expected TF workflow
// in the eyes of an experienced TF user
params.Add("detach", "false")
params.Add("permanently", "true")
params.Add("detach", "0")
params.Add("permanently", "1")
controller := m.(*ControllerCfg)
_, err = controller.decortAPICall("POST", DisksDeleteAPI, params)
_, err, _ = controller.decortAPICall("POST", DisksDeleteAPI, params)
if err != nil {
return err
}
@@ -214,6 +218,7 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "Storage end-point provider serving this disk. Cannot be changed for existing disk.",
},
@@ -221,15 +226,19 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validation.StringIsNotEmpty,
Description: "Pool where this disk is located. Cannot be changed for existing disk.",
},
"size": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "Size of the disk in GB. Note, that existing disks can only be grown in size.",
},
/* We moved "type" attribute to computed attributes section, as plugin manages disks of only
one type - "D", e.g. data disks.
"type": {
Type: schema.TypeString,
Optional: true,
@@ -238,6 +247,7 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
ValidateFunc: validation.StringInSlice([]string{"B", "D"}, false),
Description: "Optional type of this disk. Defaults to D, i.e. data disk. Cannot be changed for existing disks.",
},
*/
"description": {
Type: schema.TypeString,
@@ -259,6 +269,12 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Description: "ID of the image, which this disk was cloned from (if ever cloned).",
},
"type": {
Type: schema.TypeString,
Computed: true,
Description: "Type of this disk.",
},
"sep_type": {
Type: schema.TypeString,
Computed: true,
@@ -274,24 +290,6 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
},
Description: "List of user-created snapshots for this disk."
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Current model status of this disk.",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "Current technical status of this disk.",
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the compute instance where this disk is attached to, or 0 for unattached disk.",
},
*/
}

212
decort/resource_pfw.go Normal file
View File

@@ -0,0 +1,212 @@
/*
Copyright (c) 2019-2021 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package decort
import (
// "encoding/json"
"fmt"
log "github.com/sirupsen/logrus"
"net/url"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
func resourcePfwCreate(d *schema.ResourceData, m interface{}) error {
compId := d.Get("compute_id")
rules_set, ok := d.GetOk("rule")
if !ok || rules_set.(*schema.Set).Len() == 0 {
log.Debugf("resourcePfwCreate: empty new PFW rules set requested for compute ID %d - nothing to create", compId.(int))
return nil
}
log.Debugf("resourcePfwCreate: ready to setup %d PFW rules for compute ID %d",
rules_set.(*schema.Set).Len(), compId.(int))
controller := m.(*ControllerCfg)
apiErrCount := 0
var lastSavedError error
for _, runner := range rules_set.(*schema.Set).List() {
rule := runner.(map[string]interface{})
params := &url.Values{}
params.Add("computeId", fmt.Sprintf("%d", compId.(int)))
params.Add("publicPortStart", fmt.Sprintf("%d", rule["pub_port_start"].(int)))
params.Add("publicPortEnd", fmt.Sprintf("%d", rule["pub_port_end"].(int)))
params.Add("localBasePort", fmt.Sprintf("%d", rule["local_port"].(int)))
params.Add("proto", rule["proto"].(string))
log.Debugf("resourcePfwCreate: ready to add rule %d:%d -> %d %s for Compute ID %d",
rule["pub_port_start"].(int),rule["pub_port_end"].(int),
rule["local_port"].(int), rule["proto"].(string),
compId.(int))
_, err, _ := controller.decortAPICall("POST", ComputePfwAddAPI, params)
if err != nil {
log.Errorf("resourcePfwCreate: error adding rule %d:%d -> %d %s for Compute ID %d: %s",
rule["pub_port_start"].(int),rule["pub_port_end"].(int),
rule["local_port"].(int), rule["proto"].(string),
compId.(int),
err)
apiErrCount++
lastSavedError = err
}
}
if apiErrCount > 0 {
log.Errorf("resourcePfwCreate: there were %d error(s) adding PFW rules to Compute ID %s. Last error was: %s",
apiErrCount, compId.(int), lastSavedError)
return lastSavedError
}
return resourcePfwRead(d, m)
}
func resourcePfwRead(d *schema.ResourceData, m interface{}) error {
pfwFacts, err := utilityPfwCheckPresence(d, m)
if pfwFacts == "" {
// if empty string is returned from dataSourcePfwRead then we got no
// PFW rules. It could also be because there was some error, which
// is indicated by non-nil err value
d.SetId("") // ensure ID is empty in this case anyway
return err
}
return flattenPfw(d, pfwFacts)
}
func resourcePfwUpdate(d *schema.ResourceData, m interface{}) error {
// TODO: update not implemented yet
compId := d.Get("compute_id")
return fmt.Errorf("resourcePfwUpdate: method is not implemented yet (Compute ID %d)", compId.(int))
// return resourcePfwRead(d, m)
}
func resourcePfwDelete(d *schema.ResourceData, m interface{}) error {
compId := d.Get("compute_id")
rules_set, ok := d.GetOk("rule")
if !ok || rules_set.(*schema.Set).Len() == 0 {
log.Debugf("resourcePfwDelete: no PFW rules defined for compute ID %d - nothing to delete", compId.(int))
return nil
}
log.Debugf("resourcePfwDelete: ready to delete %d PFW rules from compute ID %d",
rules_set.(*schema.Set).Len(), compId.(int))
controller := m.(*ControllerCfg)
apiErrCount := 0
var lastSavedError error
for _, runner := range rules_set.(*schema.Set).List() {
rule := runner.(map[string]interface{})
params := &url.Values{}
params.Add("computeId", fmt.Sprintf("%d", compId.(int)))
params.Add("ruleId", fmt.Sprintf("%d", rule["rule_id"].(int)))
log.Debugf("resourcePfwCreate: ready to delete rule ID%s (%d:%d -> %d %s) from Compute ID %d",
rule["rule_id"].(int),
rule["pub_port_start"].(int),rule["pub_port_end"].(int),
rule["local_port"].(int), rule["proto"].(string),
compId.(int))
_, err, _ := controller.decortAPICall("POST", ComputePfwDelAPI, params)
if err != nil {
log.Errorf("resourcePfwDelete: error deleting rule ID %d (%d:%d -> %d %s) from Compute ID %d: %s",
rule["rule_id"].(int),
rule["pub_port_start"].(int),rule["pub_port_end"].(int),
rule["local_port"].(int), rule["proto"].(string),
compId.(int),
err)
apiErrCount++
lastSavedError = err
}
}
if apiErrCount > 0 {
log.Errorf("resourcePfwDelete: there were %d error(s) when deleting PFW rules from Compute ID %s. Last error was: %s",
apiErrCount, compId.(int), lastSavedError)
return lastSavedError
}
return nil
}
func resourcePfwExists(d *schema.ResourceData, m interface{}) (bool, error) {
// Reminder: according to Terraform rules, this function should not modify its ResourceData argument
log.Debugf("resourcePfwExists: called for Compute ID %d", d.Get("compute_id").(int))
pfwFacts, err := utilityPfwCheckPresence(d, m)
if pfwFacts == "" {
if err != nil {
return false, err
}
return false, nil
}
return true, nil
}
func resourcePfw() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
Create: resourcePfwCreate,
Read: resourcePfwRead,
Update: resourcePfwUpdate,
Delete: resourcePfwDelete,
Exists: resourcePfwExists,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Timeouts: &schema.ResourceTimeout{
Create: &Timeout180s,
Read: &Timeout30s,
Update: &Timeout180s,
Delete: &Timeout60s,
Default: &Timeout60s,
},
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the compute instance to configure port forwarding rules for.",
},
"vins_id": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the ViNS to configure port forwarding rules on. Compute must be already plugged into this ViNS and ViNS must have external network connection.",
},
// TODO: consider making "rule" attribute Required with MinItems = 1 to prevent
// empty PFW list definition
"rule": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: rulesSubresourceSchemaMake(),
},
Description: "Port forwarding rule. You may specify several rules, one in each such block.",
},
},
}
}

View File

@@ -30,6 +30,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
func resourceResgroupCreate(d *schema.ResourceData, m interface{}) error {
@@ -114,7 +115,7 @@ func resourceResgroupCreate(d *schema.ResourceData, m interface{}) error {
url_values.Add("extIp", ext_ip.(string))
}
api_resp, err := controller.decortAPICall("POST", ResgroupCreateAPI, url_values)
api_resp, err, _ := controller.decortAPICall("POST", ResgroupCreateAPI, url_values)
if err != nil {
return err
}
@@ -145,7 +146,28 @@ func resourceResgroupUpdate(d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceResgroupUpdate: called for RG name %s, account ID %d",
d.Get("name").(string), d.Get("account_id").(int))
do_update := false
/* NOTE: we do not allow changing the following attributes of an existing RG via terraform:
- def_net_type
- ipcidr
- ext_net_id
- ext_ip
The following code fragment checks if any of these have been changed and generates error.
*/
for _, attr := range []string{"def_net_type", "ipcidr", "ext_ip"} {
attr_new, attr_old := d.GetChange("def_net_type")
if attr_new.(string) != attr_old.(string) {
return fmt.Errorf("resourceResgroupUpdate: RG ID %s: changing %s for existing RG is not allowed", d.Id(), attr)
}
}
attr_new, attr_old := d.GetChange("ext_net_id")
if attr_new.(int) != attr_old.(int) {
return fmt.Errorf("resourceResgroupUpdate: RG ID %s: changing ext_net_id for existing RG is not allowed", d.Id())
}
do_general_update := false // will be true if general RG update is necessary (API rg/update)
controller := m.(*ControllerCfg)
url_values := &url.Values{}
@@ -156,7 +178,7 @@ func resourceResgroupUpdate(d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceResgroupUpdate: name specified - looking for deltas from the old settings.")
name_old, _ := d.GetChange("name")
if name_old.(string) != name_new.(string) {
do_update = true
do_general_update = true
url_values.Add("name", name_new.(string))
}
}
@@ -169,31 +191,31 @@ func resourceResgroupUpdate(d *schema.ResourceData, m interface{}) error {
quotarecord_old, _ := makeQuotaRecord(quota_value_old.([]interface{}))
if quotarecord_new.Cpu != quotarecord_old.Cpu {
do_update = true
do_general_update = true
log.Debugf("resourceResgroupUpdate: Cpu diff %d <- %d", quotarecord_new.Cpu, quotarecord_old.Cpu)
url_values.Add("maxCPUCapacity", fmt.Sprintf("%d", quotarecord_new.Cpu))
}
if quotarecord_new.Disk != quotarecord_old.Disk {
do_update = true
do_general_update = true
log.Debugf("resourceResgroupUpdate: Disk diff %d <- %d", quotarecord_new.Disk, quotarecord_old.Disk)
url_values.Add("maxVDiskCapacity", fmt.Sprintf("%d", quotarecord_new.Disk))
}
if quotarecord_new.Ram != quotarecord_old.Ram { // NB: quota on RAM is stored as float32, in units of MB
do_update = true
do_general_update = true
log.Debugf("resourceResgroupUpdate: Ram diff %f <- %f", quotarecord_new.Ram, quotarecord_old.Ram)
url_values.Add("maxMemoryCapacity", fmt.Sprintf("%f", quotarecord_new.Ram))
}
if quotarecord_new.ExtTraffic != quotarecord_old.ExtTraffic {
do_update = true
do_general_update = true
log.Debugf("resourceResgroupUpdate: ExtTraffic diff %d <- %d", quotarecord_new.ExtTraffic, quotarecord_old.ExtTraffic)
url_values.Add("maxNetworkPeerTransfer", fmt.Sprintf("%d", quotarecord_new.ExtTraffic))
}
if quotarecord_new.ExtIPs != quotarecord_old.ExtIPs {
do_update = true
do_general_update = true
log.Debugf("resourceResgroupUpdate: ExtIPs diff %d <- %d", quotarecord_new.ExtIPs, quotarecord_old.ExtIPs)
url_values.Add("maxNumPublicIP", fmt.Sprintf("%d", quotarecord_new.ExtIPs))
}
@@ -204,14 +226,14 @@ func resourceResgroupUpdate(d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceResgroupUpdate: description specified - looking for deltas from the old settings.")
desc_old, _ := d.GetChange("description")
if desc_old.(string) != desc_new.(string) {
do_update = true
do_general_update = true
url_values.Add("desc", desc_new.(string))
}
}
if do_update {
if do_general_update {
log.Debugf("resourceResgroupUpdate: detected delta between new and old RG specs - updating the RG")
_, err := controller.decortAPICall("POST", ResgroupUpdateAPI, url_values)
_, err, _ := controller.decortAPICall("POST", ResgroupUpdateAPI, url_values)
if err != nil {
return err
}
@@ -237,12 +259,12 @@ func resourceResgroupDelete(d *schema.ResourceData, m interface{}) error {
url_values := &url.Values{}
url_values.Add("rgId", d.Id())
url_values.Add("force", "true")
url_values.Add("permanently", "true")
url_values.Add("force", "1")
url_values.Add("permanently", "1")
url_values.Add("reason", "Destroyed by DECORT Terraform provider")
controller := m.(*ControllerCfg)
_, err = controller.decortAPICall("POST", ResgroupDeleteAPI, url_values)
_, err, _ = controller.decortAPICall("POST", ResgroupDeleteAPI, url_values)
if err != nil {
return err
}
@@ -294,6 +316,7 @@ func resourceResgroup() *schema.Resource {
"account_id": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "Unique ID of the account, which this resource group belongs to.",
},
@@ -301,6 +324,7 @@ func resourceResgroup() *schema.Resource {
Type: schema.TypeString,
Optional: true,
Default: "PRIVATE",
ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC", "NONE"}, false),
Description: "Type of the network, which this resource group will use as default for its computes - PRIVATE or PUBLIC or NONE.",
},
@@ -320,13 +344,13 @@ func resourceResgroup() *schema.Resource {
Type: schema.TypeInt,
Optional: true,
Default: 0,
Description: "ID of the external network, which this resource group will use as default for its computes if def_net_type=PUBLIC",
Description: "ID of the external network for default ViNS. Pass 0 if def_net_type=PUBLIC or no external connection required for the defult ViNS when def_net_type=PRIVATE",
},
"ext_ip": {
Type: schema.TypeString,
Optional: true,
Description: "IP address on the external netowrk to request, if def_net_type=PUBLIC",
Description: "IP address on the external netowrk to request when def_net_type=PRIVATE and ext_net_id is not 0",
},
/* commented out, as in this version of provider we use default Grid ID

318
decort/resource_vins.go Normal file
View File

@@ -0,0 +1,318 @@
/*
Copyright (c) 2019-2021 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file is part of Terraform (by Hashicorp) provider for Digital Energy Cloud Orchestration
Technology platfom.
Visit https://github.com/rudecs/terraform-provider-decort for full source code package and updates.
*/
package decort
import (
// "encoding/json"
"fmt"
"net/url"
"strconv"
log "github.com/sirupsen/logrus"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
// "github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
func ipcidrDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool {
if oldVal == "" && newVal != "" {
// if old value for "ipcidr" resource is empty string, it means that we are creating new ViNS
// and there is a chance that the user will want specific IP address range for this ViNS -
// check if "ipcidr" is explicitly set in TF file to a non-empty string.
log.Debugf("ipcidrDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal)
return false // there is a difference between stored and new value
}
log.Debugf("ipcidrDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=TRUE", key, oldVal, newVal)
return true // suppress difference
}
func resourceVinsCreate(d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceVinsCreate: called for ViNS name %s, Account ID %d, RG ID %d",
d.Get("name").(string), d.Get("account_id").(int), d.Get("rg_id").(int))
apiToCall := VinsCreateInAccountAPI
controller := m.(*ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("name", d.Get("name").(string))
argVal, argSet := d.GetOk("rg_id")
if argSet && argVal.(int) > 0 {
apiToCall = VinsCreateInRgAPI
urlValues.Add("rgId", fmt.Sprintf("%d", argVal.(int)))
} else {
// RG ID either not set at all or set to 0 - user may want ViNS at account level
argVal, argSet = d.GetOk("account_id")
if !argSet || argVal.(int) <= 0 {
// No valid Account ID (and no RG ID either) - cannot create ViNS
return fmt.Errorf("resourceVinsCreate: ViNS name %s - no valid account and/or resource group ID specified", d.Id())
}
urlValues.Add("accountId", fmt.Sprintf("%d", argVal.(int)))
}
argVal, argSet = d.GetOk("ext_net_id") // NB: even if ext_net_id value is explicitly set to 0, argSet = false anyway
if argSet {
if argVal.(int) > 0 {
// connect to specific external network
urlValues.Add("extNetId", fmt.Sprintf("%d", argVal.(int)))
/*
Commented out, as we've made "ext_net_ip" parameter non-configurable via Terraform!
// in case of specific ext net connection user may also want a particular IP address
argVal, argSet = d.GetOk("ext_net_ip")
if argSet && argVal.(string) != "" {
urlValues.Add("extIp", argVal.(string))
}
*/
} else {
// ext_net_id is set to a negative value - connect to default external network
// no particular IP address selection in this case
urlValues.Add("extNetId", "0")
}
}
argVal, argSet = d.GetOk("ipcidr")
if argSet && argVal.(string) != "" {
log.Debugf("resourceVinsCreate: ipcidr is set to %s", argVal.(string))
urlValues.Add("ipcidr", argVal.(string))
}
argVal, argSet = d.GetOk("description")
if argSet {
urlValues.Add("desc", argVal.(string))
}
apiResp, err, _ := controller.decortAPICall("POST", apiToCall, urlValues)
if err != nil {
return err
}
d.SetId(apiResp) // update ID of the resource to tell Terraform that the ViNS resource exists
vinsId, _ := strconv.Atoi(apiResp)
log.Debugf("resourceVinsCreate: new ViNS ID / name %d / %s creation sequence complete", vinsId, d.Get("name").(string))
// We may reuse dataSourceVinsRead here as we maintain similarity
// between ViNS resource and ViNS data source schemas
// ViNS resource read function will also update resource ID on success, so that Terraform
// will know the resource exists (however, we already did it a few lines before)
return dataSourceVinsRead(d, m)
}
func resourceVinsRead(d *schema.ResourceData, m interface{}) error {
vinsFacts, err := utilityVinsCheckPresence(d, m)
if vinsFacts == "" {
// if empty string is returned from utilityVinsCheckPresence then there is no
// such ViNS and err tells so - just return it to the calling party
d.SetId("") // ensure ID is empty
return err
}
return flattenVins(d, vinsFacts)
}
func resourceVinsUpdate(d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceVinsUpdate: called for ViNS ID / name %s / %s, Account ID %d, RG ID %d",
d.Id(), d.Get("name").(string), d.Get("account_id").(int), d.Get("rg_id").(int))
controller := m.(*ControllerCfg)
d.Partial(true)
// 1. Handle external network connection change
oldExtNetId, newExtNedId := d.GetChange("ext_net_id")
if oldExtNetId.(int) != newExtNedId.(int) {
log.Debugf("resourceVinsUpdate: changing ViNS ID %s - ext_net_id %d -> %d", d.Id(), oldExtNetId.(int), newExtNedId.(int))
extnetParams := &url.Values{}
extnetParams.Add("vinsId", d.Id())
if oldExtNetId.(int) > 0 {
// there was preexisting external net connection - disconnect ViNS
_, err, _ := controller.decortAPICall("POST", VinsExtNetDisconnectAPI, extnetParams)
if err != nil {
return err
}
}
if newExtNedId.(int) > 0 {
// new external network connection requested - connect ViNS
extnetParams.Add("netId", fmt.Sprintf("%d", newExtNedId.(int)))
_, err, _ := controller.decortAPICall("POST", VinsExtNetConnectAPI, extnetParams)
if err != nil {
return err
}
}
d.SetPartial("ext_net_id")
}
d.Partial(false)
// we may reuse dataSourceVinsRead here as we maintain similarity
// between Compute resource and Compute data source schemas
return dataSourceVinsRead(d, m)
}
func resourceVinsDelete(d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceVinsDelete: called for ViNS ID / name %s / %s, Account ID %d, RG ID %d",
d.Id(), d.Get("name").(string), d.Get("account_id").(int), d.Get("rg_id").(int))
vinsFacts, err := utilityVinsCheckPresence(d, m)
if vinsFacts == "" {
// the specified ViNS does not exist - in this case according to Terraform best practice
// we exit from Destroy method without error
return nil
}
params := &url.Values{}
params.Add("vinsId", d.Id())
params.Add("force", "1") // disconnect all computes before deleting ViNS
params.Add("permanently", "1") // delete ViNS immediately bypassing recycle bin
controller := m.(*ControllerCfg)
_, err, _ = controller.decortAPICall("POST", VinsDeleteAPI, params)
if err != nil {
return err
}
return nil
}
func resourceVinsExists(d *schema.ResourceData, m interface{}) (bool, error) {
// Reminder: according to Terraform rules, this function should not modify its ResourceData argument
log.Debugf("resourceVinsExists: called for ViNS name %s, Account ID %d, RG ID %d",
d.Get("name").(string), d.Get("account_id").(int), d.Get("rg_id").(int))
vinsFacts, err := utilityVinsCheckPresence(d, m)
if vinsFacts == "" {
if err != nil {
return false, err
}
return false, nil
}
return true, nil
}
func resourceVinsSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
Description: "Name of the ViNS. Names are case sensitive and unique within the context of an account or resource group.",
},
/* we do not need ViNS ID as an argument because if we already know this ID, it is not practical to call resource provider.
Resource Import will work anyway, as it obtains the ID of ViNS to be imported through another mechanism.
"vins_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Unique ID of the ViNS. If ViNS ID is specified, then ViNS name, rg_id and account_id are ignored.",
},
*/
"rg_id": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Default: 0,
Description: "ID of the resource group, where this ViNS belongs to. Non-zero for ViNS created at resource group level, 0 otherwise.",
},
"account_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the account, which this ViNS belongs to. For ViNS created at account level, resource group ID is 0.",
},
"ext_net_id": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(0),
Description: "ID of the external network this ViNS is connected to. Pass 0 if no external connection required.",
},
"ipcidr": {
Type: schema.TypeString,
Optional: true,
DiffSuppressFunc: ipcidrDiffSupperss,
Description: "Network address to use by this ViNS. This parameter is only valid when creating new ViNS.",
},
"description": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: "Optional user-defined text description of this ViNS.",
},
// the rest of attributes are computed
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the account, which this ViNS belongs to.",
},
"ext_ip_addr": {
Type: schema.TypeString,
Computed: true,
Description: "IP address of the external connection (valid for ViNS connected to external network, ignored otherwise).",
},
}
return rets
}
func resourceVins() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
Create: resourceVinsCreate,
Read: resourceVinsRead,
Update: resourceVinsUpdate,
Delete: resourceVinsDelete,
Exists: resourceVinsExists,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Timeouts: &schema.ResourceTimeout{
Create: &Timeout180s,
Read: &Timeout30s,
Update: &Timeout180s,
Delete: &Timeout60s,
Default: &Timeout60s,
},
Schema: resourceVinsSchemaMake(),
}
}

View File

@@ -0,0 +1,77 @@
/*
Copyright (c) 2019-2021 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package decort
import (
// "encoding/json"
// "fmt"
// "bytes"
// log "github.com/sirupsen/logrus"
// "net/url"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
// This is rules subresource of PFW resource used
// when creating/managing port forwarding rules for a compute connected
// to the corresponding network
func rulesSubresourceSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"pub_port_start": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntBetween(1, 65535),
Description: "Port number on the external interface. For a ranged rule it set the starting port number.",
},
"pub_port_end": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntBetween(1, 65535),
Description: "End port number on the external interface for a ranged rule. Set it equal to start port for a single port rule.",
},
"local_port": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntBetween(1, 65535),
Description: "Port number on the local interface.",
},
"proto": {
Type: schema.TypeString,
Required: true,
StateFunc: stateFuncToLower,
ValidateFunc: validation.StringInSlice([]string{"tcp", "udp"}, false),
Description: "Protocol for this rule. Could be either tcp or udp.",
},
// the rest are computed
"rule_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Rule ID as assigned by the cloud platform.",
},
}
return rets
}

View File

@@ -44,7 +44,7 @@ func utilityAccountCheckPresence(d *schema.ResourceData, m interface{}) (string,
// get Account right away by its ID
log.Debugf("utilityAccountCheckPresence: locating Account by its ID %d", accId.(int))
urlValues.Add("accountId", fmt.Sprintf("%d", accId.(int)))
apiResp, err := controller.decortAPICall("POST", AccountsGetAPI, urlValues)
apiResp, err, _ := controller.decortAPICall("POST", AccountsGetAPI, urlValues)
if err != nil {
return "", err
}
@@ -57,7 +57,7 @@ func utilityAccountCheckPresence(d *schema.ResourceData, m interface{}) (string,
return "", fmt.Errorf("Cannot check account presence if name is empty and no account ID specified")
}
apiResp, err := controller.decortAPICall("POST", AccountsListAPI, urlValues)
apiResp, err, _ := controller.decortAPICall("POST", AccountsListAPI, urlValues)
if err != nil {
return "", err
}
@@ -131,7 +131,7 @@ func utilityGetAccountIdBySchema(d *schema.ResourceData, m interface{}) (int, er
controller := m.(*ControllerCfg)
urlValues := &url.Values{}
apiResp, err := controller.decortAPICall("POST", AccountsListAPI, urlValues)
apiResp, err, _ := controller.decortAPICall("POST", AccountsListAPI, urlValues)
if err != nil {
return 0, err
}

View File

@@ -40,36 +40,29 @@ func (ctrl *ControllerCfg) utilityComputeExtraDisksConfigure(d *schema.ResourceD
// d is filled with data according to computeResource schema, so extra disks config is retrieved via "extra_disks" key
// If do_delta is true, this function will identify changes between new and existing specs for extra disks and try to
// update compute configuration accordingly
// Otherwise it will apply whatever is found in the new set of "extra_disks" right away.
// Primary use of do_delta=false is when calling this function from compute Create handler.
// Note that this function will not abort on API errors, but will continue to configure (attach / detach) other individual
// disks via atomic API calls. However, it will not retry failed manipulation on the same disk.
log.Debugf("utilityComputeExtraDisksConfigure: called for Compute ID %s with do_delta = %b", d.Id(), do_delta)
// NB: as of rc-1.25 "extra_disks" are TypeSet with the elem of TypeInt
old_set, new_set := d.GetChange("extra_disks")
old_disks := make([]interface{},0,0)
if old_set != nil {
old_disks = old_set.([]interface{})
}
new_disks := make([]interface{},0,0)
if new_set != nil {
new_disks = new_set.([]interface{})
}
apiErrCount := 0
var lastSavedError error
if !do_delta {
if len(new_disks) < 1 {
if new_set.(*schema.Set).Len() < 1 {
return nil
}
for _, disk := range new_disks {
for _, disk := range new_set.(*schema.Set).List() {
urlValues := &url.Values{}
urlValues.Add("computeId", d.Id())
urlValues.Add("diskId", fmt.Sprintf("%d", disk.(int)))
_, err := ctrl.decortAPICall("POST", ComputeDiskAttachAPI, urlValues)
_, err, _ := ctrl.decortAPICall("POST", ComputeDiskAttachAPI, urlValues)
if err != nil {
// failed to attach extra disk - partial resource update
apiErrCount++
@@ -86,58 +79,31 @@ func (ctrl *ControllerCfg) utilityComputeExtraDisksConfigure(d *schema.ResourceD
return nil
}
var attach_list, detach_list []int
match := false
for _, oDisk := range old_disks {
match = false
for _, nDisk := range new_disks {
if oDisk.(int) == nDisk.(int) {
match = true
break
}
}
if !match {
detach_list = append(detach_list, oDisk.(int))
}
}
log.Debugf("utilityComputeExtraDisksConfigure: detach list has %d items for Compute ID %s", len(detach_list), d.Id())
for _, nDisk := range new_disks {
match = false
for _, oDisk := range old_disks {
if nDisk.(int) == oDisk.(int) {
match = true
break
}
}
if !match {
attach_list = append(attach_list, nDisk.(int))
}
}
log.Debugf("utilityComputeExtraDisksConfigure: attach list has %d items for Compute ID %s", len(attach_list), d.Id())
for _, diskId := range detach_list {
detach_set := old_set.(*schema.Set).Difference(new_set.(*schema.Set))
log.Debugf("utilityComputeExtraDisksConfigure: detach set has %d items for Compute ID %s", detach_set.Len(), d.Id())
for _, diskId := range detach_set.List() {
urlValues := &url.Values{}
urlValues.Add("computeId", d.Id())
urlValues.Add("diskId", fmt.Sprintf("%d", diskId))
_, err := ctrl.decortAPICall("POST", ComputeDiskDetachAPI, urlValues)
urlValues.Add("diskId", fmt.Sprintf("%d", diskId.(int)))
_, err, _ := ctrl.decortAPICall("POST", ComputeDiskDetachAPI, urlValues)
if err != nil {
// failed to detach disk - there will be partial resource update
log.Debugf("utilityComputeExtraDisksConfigure: failed to detach disk ID %d from Compute ID %s: %s", diskId, d.Id(), err)
log.Errorf("utilityComputeExtraDisksConfigure: failed to detach disk ID %d from Compute ID %s: %s", diskId.(int), d.Id(), err)
apiErrCount++
lastSavedError = err
}
}
for _, diskId := range attach_list {
attach_set := new_set.(*schema.Set).Difference(old_set.(*schema.Set))
log.Debugf("utilityComputeExtraDisksConfigure: attach set has %d items for Compute ID %s", attach_set.Len(), d.Id())
for _, diskId := range attach_set.List() {
urlValues := &url.Values{}
urlValues.Add("computeId", d.Id())
urlValues.Add("diskId", fmt.Sprintf("%d", diskId))
_, err := ctrl.decortAPICall("POST", ComputeDiskAttachAPI, urlValues)
urlValues.Add("diskId", fmt.Sprintf("%d", diskId.(int)))
_, err, _ := ctrl.decortAPICall("POST", ComputeDiskAttachAPI, urlValues)
if err != nil {
// failed to attach disk - there will be partial resource update
log.Debugf("utilityComputeExtraDisksConfigure: failed to attach disk ID %d to Compute ID %s: %s", diskId, d.Id(), err)
log.Errorf("utilityComputeExtraDisksConfigure: failed to attach disk ID %d to Compute ID %s: %s", diskId.(int), d.Id(), err)
apiErrCount++
lastSavedError = err
}
@@ -152,39 +118,26 @@ func (ctrl *ControllerCfg) utilityComputeExtraDisksConfigure(d *schema.ResourceD
return nil
}
// TODO: implement do_delta logic
func (ctrl *ControllerCfg) utilityComputeNetworksConfigure(d *schema.ResourceData, do_delta bool) error {
// "d" is filled with data according to computeResource schema, so extra networks config is retrieved via "network" key
// If do_delta is true, this function will identify changes between new and existing specs for network and try to
// update compute configuration accordingly
// Otherwise it will apply whatever is found in the new set of "network" right away.
// Primary use of do_delta=false is when calling this function from compute Create handler.
/*
argVal, argSet := d.GetOk("network")
if !argSet || len(argVal.([]interface{})) < 1 {
return nil
}
net_list := argVal.([]interface{}) // network is ar array of maps; for keys see func networkSubresourceSchemaMake() definition
*/
old_set, new_set := d.GetChange("network")
oldNets := make([]interface{},0,0)
if old_set != nil {
oldNets = old_set.([]interface{}) // network is ar array of maps; for keys see func networkSubresourceSchemaMake() definition
}
newNets := make([]interface{},0,0)
if new_set != nil {
newNets = new_set.([]interface{}) // network is ar array of maps; for keys see func networkSubresourceSchemaMake() definition
}
apiErrCount := 0
var lastSavedError error
if !do_delta {
for _, net := range newNets {
if new_set.(*schema.Set).Len() < 1 {
return nil
}
for _, runner := range new_set.(*schema.Set).List() {
urlValues := &url.Values{}
net_data := net.(map[string]interface{})
net_data := runner.(map[string]interface{})
urlValues.Add("computeId", d.Id())
urlValues.Add("netType", net_data["net_type"].(string))
urlValues.Add("netId", fmt.Sprintf("%d", net_data["net_id"].(int)))
@@ -192,7 +145,7 @@ func (ctrl *ControllerCfg) utilityComputeNetworksConfigure(d *schema.ResourceDat
if ipSet {
urlValues.Add("ipAddr", ipaddr.(string))
}
_, err := ctrl.decortAPICall("POST", ComputeNetAttachAPI, urlValues)
_, err, _ := ctrl.decortAPICall("POST", ComputeNetAttachAPI, urlValues)
if err != nil {
// failed to attach network - partial resource update
apiErrCount++
@@ -208,84 +161,40 @@ func (ctrl *ControllerCfg) utilityComputeNetworksConfigure(d *schema.ResourceDat
return nil
}
var attachList, detachList []ComputeNetMgmtRecord
match := false
for _, oRunner := range oldNets {
match = false
oSpecs := oRunner.(map[string]interface{})
for _, nRunner := range newNets {
nSpecs := nRunner.(map[string]interface{})
if oSpecs["net_id"].(int) == nSpecs["net_id"].(int) && oSpecs["net_type"].(string) == nSpecs["net_type"].(string) {
match = true
break
}
}
if !match {
newItem := ComputeNetMgmtRecord{
ID: oSpecs["net_id"].(int),
Type: oSpecs["net_type"].(string),
IPAddress: oSpecs["ip_address"].(string),
MAC: oSpecs["mac"].(string),
}
detachList = append(detachList, newItem)
}
}
log.Debugf("utilityComputeNetworksConfigure: detach list has %d items for Compute ID %s", len(detachList), d.Id())
for _, nRunner := range newNets {
match = false
nSpecs := nRunner.(map[string]interface{})
for _, oRunner := range oldNets {
oSpecs := oRunner.(map[string]interface{})
if nSpecs["net_id"].(int) == oSpecs["net_id"].(int) && nSpecs["net_type"].(string) == oSpecs["net_type"].(string) {
match = true
break
}
}
if !match {
newItem := ComputeNetMgmtRecord{
ID: nSpecs["net_id"].(int),
Type: nSpecs["net_type"].(string),
}
if nSpecs["ip_address"] != nil {
newItem.IPAddress = nSpecs["ip_address"].(string)
} else {
newItem.IPAddress = "" // make sure it is empty, if not coming from the schema
}
attachList = append(attachList, newItem)
}
}
log.Debugf("utilityComputeNetworksConfigure: attach list has %d items for Compute ID %s", len(attachList), d.Id())
for _, netRec := range detachList {
detach_set := old_set.(*schema.Set).Difference(new_set.(*schema.Set))
log.Debugf("utilityComputeNetworksConfigure: detach set has %d items for Compute ID %s", detach_set.Len(), d.Id())
for _, runner := range detach_set.List() {
urlValues := &url.Values{}
net_data := runner.(map[string]interface{})
urlValues.Add("computeId", d.Id())
urlValues.Add("ipAddr", netRec.IPAddress)
urlValues.Add("mac", netRec.MAC)
_, err := ctrl.decortAPICall("POST", ComputeNetDetachAPI, urlValues)
urlValues.Add("ipAddr", net_data["ip_address"].(string))
urlValues.Add("mac", net_data["mac"].(string))
_, err, _ := ctrl.decortAPICall("POST", ComputeNetDetachAPI, urlValues)
if err != nil {
// failed to detach this network - there will be partial resource update
log.Debugf("utilityComputeNetworksConfigure: failed to detach net ID %d of type %s from Compute ID %s: %s",
netRec.ID, netRec.Type, d.Id(), err)
log.Errorf("utilityComputeNetworksConfigure: failed to detach net ID %d of type %s from Compute ID %s: %s",
net_data["net_id"].(int), net_data["net_type"].(string), d.Id(), err)
apiErrCount++
lastSavedError = err
}
}
for _, netRec := range attachList {
attach_set := new_set.(*schema.Set).Difference(old_set.(*schema.Set))
log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", attach_set.Len(), d.Id())
for _, runner := range attach_set.List() {
urlValues := &url.Values{}
net_data := runner.(map[string]interface{})
urlValues.Add("computeId", d.Id())
urlValues.Add("netId", fmt.Sprintf("%d",netRec.ID))
urlValues.Add("netType", netRec.Type)
if netRec.IPAddress != "" {
urlValues.Add("ipAddr", netRec.IPAddress)
urlValues.Add("netId", fmt.Sprintf("%d",net_data["net_id"].(int)))
urlValues.Add("netType", net_data["net_type"].(string))
if net_data["ip_address"].(string) != "" {
urlValues.Add("ipAddr", net_data["ip_address"].(string))
}
_, err := ctrl.decortAPICall("POST", ComputeNetAttachAPI, urlValues)
_, err, _ := ctrl.decortAPICall("POST", ComputeNetAttachAPI, urlValues)
if err != nil {
// failed to attach this network - there will be partial resource update
log.Debugf("utilityComputeNetworksConfigure: failed to attach net ID %d of type %s from Compute ID %s: %s",
netRec.ID, netRec.Type, d.Id(), err)
log.Errorf("utilityComputeNetworksConfigure: failed to attach net ID %d of type %s to Compute ID %s: %s",
net_data["net_id"].(int), net_data["net_type"].(string), d.Id(), err)
apiErrCount++
lastSavedError = err
}
@@ -335,7 +244,7 @@ func utilityComputeCheckPresence(d *schema.ResourceData, m interface{}) (string,
// compute ID is specified, try to get compute instance straight by this ID
log.Debugf("utilityComputeCheckPresence: locating compute by its ID %d", theId)
urlValues.Add("computeId", fmt.Sprintf("%d", theId))
computeFacts, err := controller.decortAPICall("POST", ComputeGetAPI, urlValues)
computeFacts, err, _ := controller.decortAPICall("POST", ComputeGetAPI, urlValues)
if err != nil {
return "", err
}
@@ -355,7 +264,7 @@ func utilityComputeCheckPresence(d *schema.ResourceData, m interface{}) (string,
}
urlValues.Add("rgId", fmt.Sprintf("%d", rgId))
apiResp, err := controller.decortAPICall("POST", RgListComputesAPI, urlValues)
apiResp, err, _ := controller.decortAPICall("POST", RgListComputesAPI, urlValues)
if err != nil {
return "", err
}
@@ -377,7 +286,7 @@ func utilityComputeCheckPresence(d *schema.ResourceData, m interface{}) (string,
// we found the Compute we need - now get detailed information via compute/get API
cgetValues := &url.Values{}
cgetValues.Add("computeId", fmt.Sprintf("%d", item.ID))
apiResp, err = controller.decortAPICall("POST", ComputeGetAPI, cgetValues)
apiResp, err, _ = controller.decortAPICall("POST", ComputeGetAPI, cgetValues)
if err != nil {
return "", err
}

View File

@@ -75,7 +75,7 @@ func utilityDiskCheckPresence(d *schema.ResourceData, m interface{}) (string, er
// disk ID is specified, try to get disk instance straight by this ID
log.Debugf("utilityDiskCheckPresence: locating disk by its ID %d", theId)
urlValues.Add("diskId", fmt.Sprintf("%d", theId))
diskFacts, err := controller.decortAPICall("POST", DisksGetAPI, urlValues)
diskFacts, err, _ := controller.decortAPICall("POST", DisksGetAPI, urlValues)
if err != nil {
return "", err
}
@@ -98,7 +98,7 @@ func utilityDiskCheckPresence(d *schema.ResourceData, m interface{}) (string, er
}
urlValues.Add("accountId", fmt.Sprintf("%d", validatedAccountId))
diskFacts, err := controller.decortAPICall("POST", DisksListAPI, urlValues)
diskFacts, err, _ := controller.decortAPICall("POST", DisksListAPI, urlValues)
if err != nil {
return "", err
}

View File

@@ -41,7 +41,7 @@ func (controller *ControllerCfg) utilityLocationGetDefaultGridID() (int, error)
urlValues := &url.Values{}
log.Debug("utilityLocationGetDefaultGridID: retrieving locations list")
apiResp, err := controller.decortAPICall("POST", LocationsListAPI, urlValues)
apiResp, err, _ := controller.decortAPICall("POST", LocationsListAPI, urlValues)
if err != nil {
return 0, err
}

208
decort/utility_pfw.go Normal file
View File

@@ -0,0 +1,208 @@
/*
Copyright (c) 2020-2021 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file is part of Terraform (by Hashicorp) provider for Digital Energy Cloud Orchestration
Technology platfom.
Visit https://github.com/rudecs/terraform-provider-decort for full source code package and updates.
*/
package decort
import (
"encoding/json"
"fmt"
"net/url"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
)
func utilityPfwCheckPresence(d *schema.ResourceData, m interface{}) (string, error) {
//
// This function does not modify its ResourceData argument, so it is safe to use it as core
// method for the Terraform resource Exists method.
//
// The string returned by this method mimics response of an imaginary API that will
// report PFW rules (if any) as following:
// {
// "header": {
// "computeId": <int>,
// "vinsId": <int>,
// },
// "rules": [
// {
// "id": <int>,
// "localPort": <int>,
// "protocol": <int>,
// "publicPortStart": <int>,
// "publicPortEnd": <int>,
// "vmId": <int>,
// },
// {
// ...
// },
// ],
// }
// This response unmarshalls into ComputePfwListResp structure.
// NOTE: If there are no rules for this compute, an empty string is returned along with err=nil
//
controller := m.(*ControllerCfg)
urlValues := &url.Values{}
var compId, vinsId int
// PFW resource ID is a combination of compute_id and vins_id separated by ":"
// See a note in "flattenPfw" method explaining the rationale behind this approach.
if d.Id() != "" {
log.Debugf("utilityPfwCheckPresence: setting context from PFW ID %s", d.Id())
idParts := strings.SplitN(d.Id(), ":", 2)
compId, _ = strconv.Atoi(idParts[0])
vinsId, _ = strconv.Atoi(idParts[1])
log.Debugf("utilityPfwCheckPresence: extracted Compute ID %d, ViNS %d", compId, vinsId)
if compId <= 0 || vinsId <= 0 {
return "", fmt.Errorf("Ivalid context from d.Id %s", d.Id())
}
} else {
scId, cSet := d.GetOk("compute_id")
svId, vSet := d.GetOk("vins_id")
if cSet || vSet {
log.Debugf("utilityPfwCheckPresence: setting Compute ID from schema")
compId = scId.(int)
vinsId = svId.(int)
log.Debugf("utilityPfwCheckPresence: extracted Compute ID %d, ViNS %d", compId, vinsId)
} else {
return "", fmt.Errorf("Cannot get context to check PFW rules neither from PFW ID nor from schema")
}
}
log.Debugf("utilityPfwCheckPresence: preparing to get PFW rules for Compute ID %d on ViNS ID %d", compId, vinsId)
urlValues.Add("computeId", fmt.Sprintf("%d", compId))
apiResp, err, respCode := controller.decortAPICall("POST", ComputePfwListAPI, urlValues)
if respCode == 500 {
// this is workaround for API 3.7.0 "feature" - will be removed in one of the future versions
log.Errorf("utilityPfwCheckPresence: Compute ID %d has no PFW and no connection to PFW-ready ViNS", compId)
return "", nil
}
if err != nil {
return "", err
}
pfwListResp := ComputePfwListResp{}
// Note the specifics of compute/pfwList response in API 3.7.x (this may be changed in the future):
// 1) if there are no PFW rules and compute is not connected to any PFW-able ViNS
// the response will be empty string (or HTTP error code 500)
// 2) if there are no PFW rules but compute is connected to a PFW-able ViNS
// the response will contain a list with a single element - prefix (see PfwPrefixRecord)
// 3) if there are port forwarding rules, the response will contain a list which starts
// with prefix (see PfwPrefixRecord) and then followed by one or more rule records
// (see PfwRuleRecord)
//
// EXTRA NOTE: in API 3.7.0 and the likes pfwList returns HTTP response code 500 for a compute
// that is not connected to any PFW-able ViNS - need to implement temporary workaround
if apiResp == "" {
// No port forward rules defined for this compute
return "", nil
}
log.Debugf("utilityPfwCheckPresence: ready to split API response string %s", apiResp)
twoParts := strings.SplitN(apiResp, "},", 2)
if len(twoParts) < 1 || len(twoParts) > 2 {
// Case: invalid format of API response
log.Errorf("utilityPfwCheckPresence: non-empty pfwList response for compute ID %d failed to split properly", compId)
return "", fmt.Errorf("Non-empty pfwList response failed to split properly")
}
if len(twoParts) == 1 {
// Case: compute is connected to a PWF-ready ViNS but has no PFW rules defined
log.Debugf("utilityPfwCheckPresence: compute ID %d is connected to PFW-ready ViNS but has no PFW rules", compId)
return "", nil
}
// Case: compute is connected to a PFW ready ViNS and has some PFW rule
prefixResp := strings.TrimSuffix(strings.TrimPrefix(twoParts[0], "["), ",") + "}"
log.Debugf("utilityPfwCheckPresence: ready to unmarshal prefix part %s", prefixResp)
err = json.Unmarshal([]byte(prefixResp), &pfwListResp.Header)
if err != nil {
log.Errorf("utilityPfwCheckPresence: failed to unmarshal prefix part of API response: %s", err)
return "", err
}
rulesResp := "[" + twoParts[1]
log.Debugf("utilityPfwCheckPresence: ready to unmarshal rules part %s", rulesResp)
err = json.Unmarshal([]byte(rulesResp), &pfwListResp.Rules)
if err != nil {
log.Errorf("utilityPfwCheckPresence: failed to unmarshal rules part of API response: %s", err)
return "", err
}
log.Debugf("utilityPfwCheckPresence: successfully read %d port forward rules for Compute ID %d",
len(pfwListResp.Rules), compId)
if len(pfwListResp.Rules) == 0 {
// this compute technically can have rules, but no rules are currently defined
return "", nil
}
// Now we can double check that the PFWs we've got do belong to the compute & ViNS specified in the
// PFW definition. Do so by reading compute details and comparing NatableVinsID value with the
// specified ViNS ID
//
// We may reuse urlValues here, as it already contains computeId field (see initialization above)
apiResp, err, _ = controller.decortAPICall("POST", ComputeGetAPI, urlValues)
if err != nil || apiResp == "" {
log.Errorf("utilityPfwCheckPresence: failed to get Compute ID %d details", compId)
return "", err
}
compFacts := ComputeGetResp{}
err = json.Unmarshal([]byte(rulesResp), &apiResp)
if err != nil {
log.Errorf("utilityPfwCheckPresence: failed to unmarshal compute details for ID %d: %s", compId, err)
return "", err
}
if compFacts.NatableVinsID <= 0 {
log.Errorf("utilityPfwCheckPresence: compute ID %d is not connected to a NAT-able ViNS", compId)
return "", fmt.Errorf("Compute ID %d is not connected to a NAT-able ViNS", compId)
}
if compFacts.NatableVinsID != vinsId {
log.Errorf("utilityPfwCheckPresence: ViNS ID mismatch for PFW rules on compute ID %d: actual %d, required %d",
compId, compFacts.NatableVinsID, vinsId)
return "", fmt.Errorf("ViNS ID mismatch for PFW rules on compute ID %d: actual %d, required %d",
compId, compFacts.NatableVinsID, vinsId)
}
// reconstruct API response string to return
pfwListResp.Header.ComputeID = compId
pfwListResp.Header.VinsID = compFacts.NatableVinsID
reencodedItem, err := json.Marshal(pfwListResp)
if err != nil {
return "", err
}
return string(reencodedItem[:]), nil
}

View File

@@ -39,7 +39,7 @@ import (
func (ctrl *ControllerCfg) utilityResgroupConfigGet(rgid int) (*ResgroupGetResp, error) {
urlValues := &url.Values{}
urlValues.Add("rgId", fmt.Sprintf("%d", rgid))
rgFacts, err := ctrl.decortAPICall("POST", ResgroupGetAPI, urlValues)
rgFacts, err, _ := ctrl.decortAPICall("POST", ResgroupGetAPI, urlValues)
if err != nil {
return nil, err
}
@@ -109,7 +109,7 @@ func utilityResgroupCheckPresence(d *schema.ResourceData, m interface{}) (string
// go straight for the RG by its ID
log.Debugf("utilityResgroupCheckPresence: locating RG by its ID %d", theId)
urlValues.Add("rgId", fmt.Sprintf("%d", theId))
rgFacts, err := controller.decortAPICall("POST", ResgroupGetAPI, urlValues)
rgFacts, err, _ := controller.decortAPICall("POST", ResgroupGetAPI, urlValues)
if err != nil {
return "", err
}
@@ -130,7 +130,7 @@ func utilityResgroupCheckPresence(d *schema.ResourceData, m interface{}) (string
}
urlValues.Add("includedeleted", "false")
apiResp, err := controller.decortAPICall("POST", ResgroupListAPI, urlValues)
apiResp, err, _ := controller.decortAPICall("POST", ResgroupListAPI, urlValues)
if err != nil {
return "", err
}
@@ -154,7 +154,7 @@ func utilityResgroupCheckPresence(d *schema.ResourceData, m interface{}) (string
// Namely, we need resource quota settings
reqValues := &url.Values{}
reqValues.Add("rgId", fmt.Sprintf("%d", item.ID))
apiResp, err := controller.decortAPICall("POST", ResgroupGetAPI, reqValues)
apiResp, err, _ := controller.decortAPICall("POST", ResgroupGetAPI, reqValues)
if err != nil {
return "", err
}

View File

@@ -28,6 +28,7 @@ import (
"encoding/json"
"fmt"
"net/url"
"strconv"
log "github.com/sirupsen/logrus"
@@ -37,7 +38,7 @@ import (
func (ctrl *ControllerCfg) utilityVinsConfigGet(vinsid int) (*VinsRecord, error) {
urlValues := &url.Values{}
urlValues.Add("vinsId", fmt.Sprintf("%d", vinsid))
vinsFacts, err := ctrl.decortAPICall("POST", VinsGetAPI, urlValues)
vinsFacts, err, _ := ctrl.decortAPICall("POST", VinsGetAPI, urlValues)
if err != nil {
return nil, err
}
@@ -75,6 +76,34 @@ func utilityVinsCheckPresence(d *schema.ResourceData, m interface{}) (string, er
controller := m.(*ControllerCfg)
urlValues := &url.Values{}
// make it possible to use "read" & "check presence" functions with ViNS ID set so
// that Import of ViNS resource is possible
idSet := false
theId, err := strconv.Atoi(d.Id())
if err != nil || theId <= 0 {
vinsId, argSet := d.GetOk("vins_id") // NB: vins_id is NOT present in vinsResource schema!
if argSet {
theId = vinsId.(int)
idSet = true
}
} else {
idSet = true
}
if idSet {
// ViNS ID is specified, try to get compute instance straight by this ID
log.Debugf("utilityVinsCheckPresence: locating ViNS by its ID %d", theId)
urlValues.Add("vinsId", fmt.Sprintf("%d", theId))
vinsFacts, err, _ := controller.decortAPICall("POST", VinsGetAPI, urlValues)
if err != nil {
return "", err
}
return vinsFacts, nil
}
// ID was not set in the schema upon entering this function - work through ViNS name
// and Account / RG ID
vinsName, argSet := d.GetOk("name")
if !argSet {
// if ViNS name is not set. then we cannot locate ViNS
@@ -82,11 +111,11 @@ func utilityVinsCheckPresence(d *schema.ResourceData, m interface{}) (string, er
}
urlValues.Add("name", vinsName.(string))
urlValues.Add("show_all", "false")
log.Debugf("utilityVinsCheckPresence: locating ViNS %s", vinsName.(string))
log.Debugf("utilityVinsCheckPresence: preparing to locate ViNS name %s", vinsName.(string))
rgId, rgSet := d.GetOk("rg_id")
if rgSet {
log.Debugf("utilityVinsCheckPresence: limiting ViNS t search to RG ID %d", rgId.(int))
log.Debugf("utilityVinsCheckPresence: limiting ViNS search to RG ID %d", rgId.(int))
urlValues.Add("rgId", fmt.Sprintf("%d", rgId.(int)))
}
@@ -96,7 +125,7 @@ func utilityVinsCheckPresence(d *schema.ResourceData, m interface{}) (string, er
urlValues.Add("accountId", fmt.Sprintf("%d", accountId.(int)))
}
apiResp, err := controller.decortAPICall("POST", VinsSearchAPI, urlValues)
apiResp, err, _ := controller.decortAPICall("POST", VinsSearchAPI, urlValues)
if err != nil {
return "", err
}
@@ -125,7 +154,7 @@ func utilityVinsCheckPresence(d *schema.ResourceData, m interface{}) (string, er
// manage ViNS, so we have to get detailed info by calling API vins/get
rqValues := &url.Values{}
rqValues.Add("vinsId", fmt.Sprintf("%d",item.ID))
vinsGetResp, err := controller.decortAPICall("POST", VinsGetAPI, rqValues)
vinsGetResp, err, _ := controller.decortAPICall("POST", VinsGetAPI, rqValues)
if err != nil {
return "", err
}
@@ -133,5 +162,5 @@ func utilityVinsCheckPresence(d *schema.ResourceData, m interface{}) (string, er
}
}
return "", fmt.Errorf("Cannot find ViNS name %s. Check name and/or RG ID & Account ID", vinsName.(string))
return "", fmt.Errorf("Cannot find ViNS name %s. Check name and/or RG ID & Account ID and your access rights", vinsName.(string))
}