Debug Compute and Disk resource logic

rc-1.0
Sergey Shubin svs1370 4 years ago
parent 7d589e3276
commit 9814df8700

@ -30,13 +30,14 @@ import (
"crypto/tls"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strconv"
"strings"
// "time"
log "github.com/sirupsen/logrus"
"github.com/dgrijalva/jwt-go"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
@ -135,7 +136,7 @@ func ControllerConfigure(d *schema.ResourceData) (*ControllerCfg, error) {
}
if allow_unverified_ssl {
log.Printf("ControllerConfigure: allow_unverified_ssl is set - will not check certificates!")
log.Warn("ControllerConfigure: allow_unverified_ssl is set - will not check certificates!")
transCfg := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true},}
ret_config.cc_client = &http.Client{
Transport: transCfg,
@ -389,7 +390,7 @@ func (config *ControllerCfg) decortAPICall(method string, api_name string, url_v
return "", err
}
json_resp := Jo2JSON(string(tmp_body))
log.Printf("decortAPICall:\n %s", json_resp)
log.Debugf("decortAPICall: %s %s\n %s", method, api_name, json_resp)
return json_resp, nil
} else {
return "", fmt.Errorf("decortAPICall: unexpected status code %d when calling API %q with request Body %q",

@ -49,7 +49,7 @@ func parseComputeDisksToExtraDisks(disks []DiskRecord) []interface{} {
length = 0
}
result := make([]interface{}, length)
result := make([]interface{}, length-1)
if length == 0 {
return result
@ -139,7 +139,7 @@ func parseBootDiskSize(disks []DiskRecord) int {
// Parse the list of interfaces from compute/get response into a list of networks
// attached to this compute
func parseComputeInterfacesToNetworks(ifaces []InterfaceRecord) []interface{} {
// return value will be used to d.Set("networks",) item of dataSourceCompute schema
// return value will be used to d.Set("network",) item of dataSourceCompute schema
length := len(ifaces)
log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length)
@ -222,7 +222,7 @@ func flattenCompute(d *schema.ResourceData, compFacts string) error {
log.Debugf("flattenCompute: ID %d, RG ID %d", model.ID, model.RgID)
d.SetId(fmt.Sprintf("%d", model.ID))
d.Set("compute_id", model.ID)
// d.Set("compute_id", model.ID) - we should NOT set compute_id in the schema here: if it was set - it is already set, if it wasn't - we shouldn't
d.Set("name", model.Name)
d.Set("rg_id", model.RgID)
d.Set("rg_name", model.RgName)
@ -247,7 +247,7 @@ func flattenCompute(d *schema.ResourceData, compFacts string) error {
if len(model.Interfaces) > 0 {
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(model.Interfaces))
if err = d.Set("networks", parseComputeInterfacesToNetworks(model.Interfaces)); err != nil {
if err = d.Set("network", parseComputeInterfacesToNetworks(model.Interfaces)); err != nil {
return err
}
}
@ -367,7 +367,7 @@ func dataSourceCompute() *schema.Resource {
Elem: &schema.Schema {
Type: schema.TypeInt,
},
Description: "IDs of the extra disks attached to this compute.",
Description: "IDs of the extra disk(s) attached to this compute.",
},
/*
@ -381,14 +381,14 @@ func dataSourceCompute() *schema.Resource {
},
*/
"networks": {
"network": {
Type: schema.TypeList,
Optional: true,
MaxItems: MaxNetworksPerCompute,
Elem: &schema.Resource{
Schema: networkSubresourceSchemaMake(),
},
Description: "Networks this compute is attached to.",
Description: "Network connection(s) for this compute.",
},
/*

@ -40,8 +40,11 @@ func flattenDisk(d *schema.ResourceData, disk_facts string) error {
//
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceDiskExists(...) method. Use utilityDiskCheckPresence instead.
log.Debugf("flattenDisk: ready to unmarshal string %s", disk_facts)
model := DiskRecord{}
log.Debugf("flattenDisk: ready to unmarshal string %q", disk_facts)
err := json.Unmarshal([]byte(disk_facts), &model)
if err != nil {
return err
@ -50,7 +53,7 @@ func flattenDisk(d *schema.ResourceData, disk_facts string) error {
log.Debugf("flattenDisk: disk ID %d, disk AccountID %d", model.ID, model.AccountID)
d.SetId(fmt.Sprintf("%d", model.ID))
d.Set("disk_id", model.ID)
// d.Set("disk_id", model.ID) - we should NOT update disk_id in the schema. If it was set - it is already set, if it wasn't - we shouldn't
d.Set("name", model.Name)
d.Set("account_id", model.AccountID)
d.Set("account_name", model.AccountName)

@ -62,7 +62,7 @@ func dataSourceImageRead(d *schema.ResourceData, m interface{}) error {
for index, item := range model {
// need to match Image by name
if item.Name == name {
log.Printf("dataSourceImageRead: index %d, matched name %s", index, item.Name)
log.Debugf("dataSourceImageRead: index %d, matched name %s", index, item.Name)
d.SetId(fmt.Sprintf("%d", item.ID))
d.Set("account_id", item.AccountID)
d.Set("arch", item.Arch)

@ -143,7 +143,7 @@ func flattenNetworks(nets []NicRecord) []interface{} {
length += 1
}
}
log.Printf("flattenNetworks: found %d NICs with PUBLIC type", length)
log.Debugf("flattenNetworks: found %d NICs with PUBLIC type", length)
result := make([]interface{}, length)
if length == 0 {
@ -163,7 +163,7 @@ func flattenNetworks(nets []NicRecord) []interface{} {
elem["network_id"], _ = strconv.Atoi(substr[1])
elem["ip_range"] = value.IPAddress
// elem["label"] = ... - should be uncommented for the future release
log.Printf("flattenNetworks: parsed element %d - network_id %d, ip_range %q",
log.Debugf("flattenNetworks: parsed element %d - network_id %d, ip_range %s",
index, elem["network_id"].(int), value.IPAddress)
result[subindex] = elem
subindex += 1

@ -31,7 +31,7 @@ import (
func makeQuotaRecord(arg_list []interface{}) (QuotaRecord, int) {
quota := QuotaRecord{
Cpu: -1,
Ram: -1.,
Ram: -1., // this is float32, but may change in the future
Disk: -1,
ExtTraffic: -1,
ExtIPs: -1,
@ -48,7 +48,7 @@ func makeQuotaRecord(arg_list []interface{}) (QuotaRecord, int) {
}
if subres_data["ram"].(int) > 0 {
quota.Ram = subres_data["ram"].(float32) // RAM volume in MB, as float!
quota.Ram = subres_data["ram"].(float32) // RAM volume in MB, as float32!
}
if subres_data["ext_traffic"].(int) > 0 {
@ -70,7 +70,7 @@ func parseQuota(quota QuotaRecord) []interface{} {
quota_map := make(map[string]interface{})
quota_map["cpu"] = quota.Cpu
quota_map["ram"] = quota.Ram // MB; this is float32, unlike the rest of values
quota_map["ram"] = quota.Ram // NB: this is float32, unlike the rest of values
quota_map["disk"] = quota.Disk
quota_map["ext_traffic"] = quota.ExtTraffic
quota_map["ext_ips"] = quota.ExtIPs

@ -96,7 +96,7 @@ func resourceComputeCreate(d *schema.ResourceData, m interface{}) error {
d.SetPartial("ssh_keys")
}
log.Debugf("resourceComputeCreate: new simple Compute ID %d, name %q created", compId, d.Get("name").(string))
log.Debugf("resourceComputeCreate: new simple Compute ID %d, name %s created", compId, d.Get("name").(string))
// Configure data disks if any
extraDisksOk := true
@ -116,7 +116,7 @@ func resourceComputeCreate(d *schema.ResourceData, m interface{}) error {
// Configure external networks if any
netsOk := true
argVal, argSet = d.GetOk("networks")
argVal, argSet = d.GetOk("network")
if argSet && len(argVal.([]interface{})) > 0 {
log.Debugf("resourceComputeCreate: calling utilityComputeNetworksConfigure to attach %d network(s)", len(argVal.([]interface{})))
err = controller.utilityComputeNetworksConfigure(d, false) // do_delta=false, as we are working on a new compute
@ -127,7 +127,7 @@ func resourceComputeCreate(d *schema.ResourceData, m interface{}) error {
}
if netsOk {
// there were no errors reported when configuring networks
d.SetPartial("networks")
d.SetPartial("network")
}
if extraDisksOk && netsOk {
@ -135,7 +135,7 @@ func resourceComputeCreate(d *schema.ResourceData, m interface{}) error {
d.Partial(false)
}
log.Debugf("resourceComputeCreate: new Compute ID %d, name %q creation sequence complete", compId, d.Get("name").(string))
log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", compId, d.Get("name").(string))
// We may reuse dataSourceComputeRead here as we maintain similarity
// between Compute resource and Compute data source schemas
@ -145,7 +145,7 @@ func resourceComputeCreate(d *schema.ResourceData, m interface{}) error {
}
func resourceComputeRead(d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceComputeRead: called for Compute name %q, RG ID %d",
log.Debugf("resourceComputeRead: called for Compute name %s, RG ID %d",
d.Get("name").(string), d.Get("rg_id").(int))
compFacts, err := utilityComputeCheckPresence(d, m)
@ -168,7 +168,7 @@ func resourceComputeRead(d *schema.ResourceData, m interface{}) error {
}
func resourceComputeUpdate(d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceComputeUpdate: called for Compute name %q, RGID %d",
log.Debugf("resourceComputeUpdate: called for Compute name %s, RGID %d",
d.Get("name").(string), d.Get("rg_id").(int))
log.Printf("resourceComputeUpdate: NOT IMPLEMENTED YET!")
@ -182,7 +182,7 @@ func resourceComputeDelete(d *schema.ResourceData, m interface{}) error {
// NOTE: this function destroys target Compute instance "permanently", so
// there is no way to restore it. It also destroys all extra disks
// attached to this compute, so "User, ye be warned!"
log.Debugf("resourceComputeDelete: called for Compute name %q, RG ID %d",
log.Debugf("resourceComputeDelete: called for Compute name %s, RG ID %d",
d.Get("name").(string), d.Get("rg_id").(int))
compFacts, err := utilityComputeCheckPresence(d, m)
@ -288,7 +288,7 @@ func resourceCompute() *schema.Resource {
"boot_disk_size": {
Type: schema.TypeInt,
Optional: true,
Description: "Size of the boot disk on this compute instance.",
Description: "This compute instance boot disk size in GB.",
},
"extra_disks": {
@ -298,17 +298,17 @@ func resourceCompute() *schema.Resource {
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "Optional list of IDs of the extra disks to attach to this compute.",
Description: "Optional list of IDs of extra disks to attach to this compute. You may specify several extra disks.",
},
"networks": {
"network": {
Type: schema.TypeList,
Optional: true,
MaxItems: MaxNetworksPerCompute,
Elem: &schema.Resource{
Schema: networkSubresourceSchemaMake(),
},
Description: "Optional list of networks to attach this compute to.",
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.",
},
"ssh_keys": {

@ -45,14 +45,14 @@ func resourceDiskCreate(d *schema.ResourceData, m interface{}) error {
urlValues.Add("accountId", fmt.Sprintf("%d", d.Get("account_id").(int)))
urlValues.Add("gid", fmt.Sprintf("%d", DefaultGridID)) // we use default Grid ID, which was obtained along with DECORT Controller init
urlValues.Add("name", d.Get("name").(string))
urlValues.Add("size", d.Get("size").(string))
urlValues.Add("size", fmt.Sprintf("%d", d.Get("size").(int)))
urlValues.Add("type", d.Get("type").(string))
urlValues.Add("sep_id", fmt.Sprintf("%d", d.Get("sep_id").(int)))
urlValues.Add("pool", d.Get("pool").(string))
argVal, argSet := d.GetOk("description")
if argSet {
urlValues.Add("decs", argVal.(string))
urlValues.Add("description", argVal.(string))
}
apiResp, err := controller.decortAPICall("POST", DisksCreateAPI, urlValues)
@ -212,6 +212,7 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
"description": {
Type: schema.TypeString,
Optional: true,
Default: "Disk resource managed by Terraform",
Description: "Optional user-defined text description of this disk.",
},

@ -43,17 +43,24 @@ func (ctrl *ControllerCfg) utilityComputeExtraDisksConfigure(d *schema.ResourceD
// Note that this function will not abort on API errors, but will continue to configure (attach / detach) other individual
// disks via atomic API calls. However, it will not retry failed manipulation on the same disk.
log.Debugf("utilityComputeExtraDisksConfigure: called for Compute ID %s with do_delta = %b", d.Id(), do_delta)
old_set, new_set := d.GetChange("extra_disks")
old_disks := old_set.([]interface{}) // NB: "extra_disks" is an array of ints
new_disks := new_set.([]interface{})
old_disks := make([]interface{},0,0)
if old_set != nil {
old_disks = old_set.([]interface{})
}
new_disks := make([]interface{},0,0)
if new_set != nil {
new_disks = new_set.([]interface{})
}
apiErrCount := 0
var lastSavedError error
if !do_delta {
log.Debugf("utilityComputeExtraDisksConfigure: called for Compute ID %s with do_delta = false", d.Id())
if len(new_disks) < 1 {
return nil
}
@ -152,15 +159,15 @@ func (ctrl *ControllerCfg) utilityComputeExtraDisksConfigure(d *schema.ResourceD
// TODO: implement do_delta logic
func (ctrl *ControllerCfg) utilityComputeNetworksConfigure(d *schema.ResourceData, do_delta bool) error {
// "d" is filled with data according to computeResource schema, so extra networks config is retrieved via "networks" key
// "d" is filled with data according to computeResource schema, so extra networks config is retrieved via "network" key
// If do_delta is true, this function will identify changes between new and existing specs for network and try to
// update compute configuration accordingly
argVal, argSet := d.GetOk("networks")
argVal, argSet := d.GetOk("network")
if !argSet || len(argVal.([]interface{})) < 1 {
return nil
}
net_list := argVal.([]interface{}) // networks" is ar array of maps; for keys see func networkSubresourceSchemaMake() definition
net_list := argVal.([]interface{}) // network is ar array of maps; for keys see func networkSubresourceSchemaMake() definition
for _, net := range net_list {
urlValues := &url.Values{}
@ -241,7 +248,7 @@ func utilityComputeCheckPresence(d *schema.ResourceData, m interface{}) (string,
return "", err
}
log.Debugf("utilityComputeCheckPresence: ready to unmarshal string %q", apiResp)
log.Debugf("utilityComputeCheckPresence: ready to unmarshal string %s", apiResp)
computeList := RgListComputesResp{}
err = json.Unmarshal([]byte(apiResp), &computeList)
@ -254,7 +261,7 @@ func utilityComputeCheckPresence(d *schema.ResourceData, m interface{}) (string,
for index, item := range computeList {
// need to match Compute by name, skip Computes with the same name in DESTROYED satus
if item.Name == computeName.(string) && item.Status != "DESTROYED" {
log.Debugf("utilityComputeCheckPresence: index %d, matched name %q", index, item.Name)
log.Debugf("utilityComputeCheckPresence: index %d, matched name %s", index, item.Name)
// we found the Compute we need - now get detailed information via compute/get API
cgetValues := &url.Values{}
cgetValues.Add("computeId", fmt.Sprintf("%d", item.ID))

@ -116,7 +116,7 @@ func utilityDiskCheckPresence(d *schema.ResourceData, m interface{}) (string, er
for index, item := range disksList {
// need to match disk by name, return the first match
if item.Name == diskName.(string) && item.Status != "DESTROYED" {
log.Printf("utilityDiskCheckPresence: index %d, matched disk name %q", index, item.Name)
log.Debugf("utilityDiskCheckPresence: index %d, matched disk name %q", index, item.Name)
// we found the disk we need - not get detailed information via API call to disks/get
/*
// TODO: this may not be optimal as it initiates one extra call to the DECORT controller

@ -31,7 +31,7 @@ import (
)
func Jo2JSON(arg_str string) string {
// DECS API historically returns response in the form of Python dictionary, which generally
// DECORT API historically returns response in the form of Python dictionary, which generally
// looks like JSON, but does not comply with JSON syntax.
// For Golang JSON Unmarshal to work properly we need to pre-process API response as follows:
ret_string := strings.Replace(string(arg_str), "u'", "\"", -1)

@ -121,7 +121,7 @@ func utilityResgroupCheckPresence(d *schema.ResourceData, m interface{}) (string
return "", err
}
// log.Debugf("%s", apiResp)
// log.Debugf("utilityResgroupCheckPresence: ready to decode response body from %q", ResgroupListAPI)
log.Debugf("utilityResgroupCheckPresence: ready to decode response body from %s", ResgroupListAPI)
model := ResgroupListResp{}
err = json.Unmarshal([]byte(apiResp), &model)
if err != nil {

Loading…
Cancel
Save