Major rework of logic around compute resource and its subresources

rc-1.0
Sergey Shubin svs1370 4 years ago
parent 4f617334be
commit ce84733848

@ -0,0 +1,298 @@
/*
Copyright (c) 2019-2021 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file is part of Terraform (by Hashicorp) provider for Digital Energy Cloud Orchestration
Technology platfom.
Visit https://github.com/rudecs/terraform-provider-decort for full source code package and updates.
*/
package decort
import (
"encoding/json"
"fmt"
"log"
// "net/url"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
)
func parseComputeDisks(disks []DiskRecord) []interface{} {
length := len(disks)
log.Debugf("parseComputeDisks: called for %d disks", length)
result := make([]interface{}, length)
if length == 0 {
return result
}
elem := make(map[string]interface{})
for i, value := range disks {
// keys in this map should correspond to the Schema definition
// as returned by dataSourceDiskSchemaMake()
elem[" attribute "] = value. attribute
...
result[i] = elem
}
return result // this result will be used to d.Set("disks",) item of dataSourceCompute schema
}
func parseComputeInterfaces(ifaces []InterfaceRecord) []interface{} {
length := len(ifaces)
log.Debugf("parseComputeInterfaces: called for %d ifaces", length)
result := make([]interface{}, length)
if length == 0 {
return result
}
elem := make(map[string]interface{})
for i, value := range ifaces {
// Keys in this map should correspond to the Schema definition
// as returned by dataSourceInterfaceSchemaMake()
elem[" attribute "] = value. attribute
...
result[i] = elem
}
return result // this result will be used to d.Set("interfaces",) item of dataSourceCompute schema
}
func flattenCompute(d *schema.ResourceData, comp_facts string) error {
// This function expects that comp_facts string contains response from API compute/get,
// i.e. detailed information about compute instance.
//
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceComputeExists(...) method
model := ComputeGetResp{}
log.Debugf("flattenCompute: ready to unmarshal string %q", comp_facts)
err := json.Unmarshal([]byte(comp_facts), &model)
if err != nil {
return err
}
log.Debugf("flattenCompute: model.ID %d, model.ResGroupID %d", model.ID, model.ResGroupID)
d.SetId(fmt.Sprintf("%d", model.ID))
d.Set("compute_id", model.ID)
d.Set("name", model.Name)
d.Set("rg_id", model.ResGroupID)
d.Set("rg_name", model.ResGroupName)
d.Set("account_id", model.AccountID)
d.Set("account_name", model.AccountName)
d.Set("arch", model.Arch)
d.Set("cpu", model.Cpu)
d.Set("ram", model.Ram)
d.Set("boot_disk_size", model.BootDiskSize)
d.Set("image_id", model.ImageID)
d.Set("description", model.Desc)
d.Set("status", model.Status)
d.Set("tech_status", model.TechStatus)
if len(model.Disks) > 0 {
log.Debugf("flattenCompute: calling parseComputeDisks for %d disks", len(model.Disks))
if err = d.Set("disks", parseComputeDisks(model.Disks)); err != nil {
return err
}
}
if len(model.Interfaces) > 0 {
log.Printf("flattenCompute: calling parseComputeInterfaces for %d interfaces", len(model.Interfaces))
if err = d.Set("interfaces", parseComputeInterfaces(model.Interfaces)); err != nil {
return err
}
}
if len(model.GuestLogins) > 0 {
log.Printf("flattenCompute: calling parseGuestLogins")
guest_logins := parseGuestLogins(model.GuestLogins)
if err = d.Set("guest_logins", guest_logins); err != nil {
return err
}
default_login := guest_logins[0].(map[string]interface{})
// set user & password attributes to the corresponding values of the 1st item in the list
if err = d.Set("user", default_login["login"]); err != nil {
return err
}
if err = d.Set("password", default_login["password"]); err != nil {
return err
}
}
return nil
}
func dataSourceComputeRead(d *schema.ResourceData, m interface{}) error {
comp_facts, err := utilityComputeCheckPresence(d, m)
if comp_facts == "" {
// if empty string is returned from utilityComputeCheckPresence then there is no
// such Compute and err tells so - just return it to the calling party
d.SetId("") // ensure ID is empty
return err
}
return flattenCompute(d, comp_facts)
}
func dataSourceCompute() *schema.Resource {
return &schema.Resource {
SchemaVersion: 1,
Read: dataSourceComputeRead,
Timeouts: &schema.ResourceTimeout {
Read: &Timeout30s,
Default: &Timeout60s,
},
Schema: map[string]*schema.Schema {
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Name of this compute instance. NOTE: this parameter is case sensitive.",
},
"compute_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the compute instance. If ID is specified, name and resource group ID are ignored."
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the resource group where this compute instance is located.",
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the resource group where this compute instance is located.",
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the account this compute instance belongs to.",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the account this compute instance belongs to.",
},
"arch": {
Type: schema.TypeString,
Computed: true,
Description: "Hardware architecture of this compute instance.",
},
"cpu": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of CPUs allocated for this compute instance.",
},
"ram": {
Type: schema.TypeInt,
Computed: true,
Description: "Amount of RAM in MB allocated for this compute instance.",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the OS image this compute instance is based on.",
},
"image_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the OS image this compute instance is based on.",
},
"boot_disk_size": {
Type: schema.TypeInt,
Computed: true,
Description: "This compute instance boot disk size in GB.",
},
"disks": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource {
Schema: dataSourceDiskSchemaMake(), // ID, type, name, size, account ID, SEP ID, SEP type, pool, status, tech status, compute ID, image ID
},
Description: "Detailed specification for all disks attached to this compute instance (including bood disk).",
},
"guest_logins": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource {
Schema: guestLoginsSubresourceSchema(),
},
Description: "Details about the guest OS users provisioned together with this compute instance.",
},
"interfaces": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource {
Schema: interfaceSubresourceSchema(),
},
Description: "Specification for the virtual NICs configured on this compute instance.",
},
"description": {
Type: schema.TypeString,
Computed: true,
Description: "User-defined text description of this compute instance.",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Current model status of this compute instance.",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "Current technical status of this compute instance.",
},
/*
"internal_ip": {
Type: schema.TypeString,
Computed: true,
Description: "Internal IP address of this Compute.",
},
*/
},
}
}

@ -0,0 +1,485 @@
/*
Copyright (c) 2019-2020 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file is part of Terraform (by Hashicorp) provider for Digital Energy Cloud Orchestration
Technology platfom.
Visit https://github.com/rudecs/terraform-provider-decort for full source code package and updates.
*/
package decort
import (
"encoding/json"
"fmt"
"log"
"net/url"
"strconv"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
)
func resourceComputeCreate(d *schema.ResourceData, m interface{}) error {
machine := &MachineConfig{
ResGroupID: d.Get("rgid").(int),
Name: d.Get("name").(string),
Cpu: d.Get("cpu").(int),
Ram: d.Get("ram").(int),
ImageID: d.Get("image_id").(int),
Description: d.Get("description").(string),
}
// BootDisk
// DataDisks
// Networks
// PortForwards
// SshKeyData string
log.Printf("resourceComputeCreate: called for VM name %q, ResGroupID %d", machine.Name, machine.ResGroupID)
var subres_list []interface{}
var subres_data map[string]interface{}
var arg_value interface{}
var arg_set bool
// boot disk list is a required argument and has only one element,
// which is of type diskSubresourceSchema
subres_list = d.Get("boot_disk").([]interface{})
subres_data = subres_list[0].(map[string]interface{})
machine.BootDisk.Label = subres_data["label"].(string)
machine.BootDisk.Size = subres_data["size"].(int)
machine.BootDisk.Pool = subres_data["pool"].(string)
machine.BootDisk.Provider = subres_data["provider"].(string)
arg_value, arg_set = d.GetOk("data_disks")
if arg_set {
log.Printf("resourceComputeCreate: calling makeDisksConfig")
machine.DataDisks, _ = makeDisksConfig(arg_value.([]interface{}))
}
arg_value, arg_set = d.GetOk("networks")
if arg_set {
log.Printf("resourceComputeCreate: calling makeNetworksConfig")
machine.Networks, _ = makeNetworksConfig(arg_value.([]interface{}))
}
arg_value, arg_set = d.GetOk("port_forwards")
if arg_set {
log.Printf("resourceComputeCreate: calling makePortforwardsConfig")
machine.PortForwards, _ = makePortforwardsConfig(arg_value.([]interface{}))
}
arg_value, arg_set = d.GetOk("ssh_keys")
if arg_set {
log.Printf("resourceComputeCreate: calling makeSshKeysConfig")
machine.SshKeys, _ = makeSshKeysConfig(arg_value.([]interface{}))
}
// create basic VM (i.e. without port forwards and ext network connections - those will be done
// by separate API calls)
d.Partial(true)
controller := m.(*ControllerCfg)
url_values := &url.Values{}
url_values.Add("cloudspaceId", fmt.Sprintf("%d", machine.ResGroupID))
url_values.Add("name", machine.Name)
url_values.Add("description", machine.Description)
url_values.Add("vcpus", fmt.Sprintf("%d", machine.Cpu))
url_values.Add("memory", fmt.Sprintf("%d", machine.Ram))
url_values.Add("imageId", fmt.Sprintf("%d", machine.ImageID))
url_values.Add("disksize", fmt.Sprintf("%d", machine.BootDisk.Size))
if len(machine.SshKeys) > 0 {
url_values.Add("userdata", makeSshKeysArgString(machine.SshKeys))
}
api_resp, err := controller.decortAPICall("POST", MachineCreateAPI, url_values)
if err != nil {
return err
}
d.SetId(api_resp) // machines/create API plainly returns ID of the new VM on success
machine.ID, _ = strconv.Atoi(api_resp)
d.SetPartial("name")
d.SetPartial("description")
d.SetPartial("cpu")
d.SetPartial("ram")
d.SetPartial("image_id")
d.SetPartial("boot_disk")
if len(machine.SshKeys) > 0 {
d.SetPartial("ssh_keys")
}
log.Printf("resourceComputeCreate: new VM ID %d, name %q created", machine.ID, machine.Name)
if len(machine.DataDisks) > 0 || len(machine.PortForwards) > 0 {
// for data disk or port foreards provisioning we have to know Tenant ID
// and Grid ID so we call utilityResgroupConfigGet method to populate these
// fields in the machine structure that will be passed to provisionVmDisks or
// provisionVmPortforwards
log.Printf("resourceComputeCreate: calling utilityResgroupConfigGet")
resgroup, err := controller.utilityResgroupConfigGet(machine.ResGroupID)
if err == nil {
machine.TenantID = resgroup.TenantID
machine.GridID = resgroup.GridID
machine.ExtIP = resgroup.ExtIP
log.Printf("resourceComputeCreate: tenant ID %d, GridID %d, ExtIP %q",
machine.TenantID, machine.GridID, machine.ExtIP)
}
}
//
// Configure data disks
disks_ok := true
if len(machine.DataDisks) > 0 {
log.Printf("resourceComputeCreate: calling utilityVmDisksProvision for disk count %d", len(machine.DataDisks))
if machine.TenantID == 0 {
// if TenantID is still 0 it means that we failed to get Resgroup Facts by
// a previous call to utilityResgroupGetFacts,
// hence we do not have technical ability to provision data disks
disks_ok = false
} else {
// provisionVmDisks accomplishes two steps for each data disk specification
// 1) creates the disks
// 2) attaches them to the VM
err = controller.utilityVmDisksProvision(machine)
if err != nil {
disks_ok = false
}
}
}
if disks_ok {
d.SetPartial("data_disks")
}
//
// Configure port forward rules
pfws_ok := true
if len(machine.PortForwards) > 0 {
log.Printf("resourceComputeCreate: calling utilityVmPortforwardsProvision for pfw rules count %d", len(machine.PortForwards))
if machine.ExtIP == "" {
// if ExtIP is still empty it means that we failed to get Resgroup Facts by
// a previous call to utilityResgroupGetFacts,
// hence we do not have technical ability to provision port forwards
pfws_ok = false
} else {
err := controller.utilityVmPortforwardsProvision(machine)
if err != nil {
pfws_ok = false
}
}
}
if pfws_ok {
// there were no errors reported when configuring port forwards
d.SetPartial("port_forwards")
}
//
// Configure external networks
// NOTE: currently only one external network can be attached to each VM, so in the current
// implementation we ignore all but the 1st network definition
nets_ok := true
if len(machine.Networks) > 0 {
log.Printf("resourceComputeCreate: calling utilityVmNetworksProvision for networks count %d", len(machine.Networks))
err := controller.utilityVmNetworksProvision(machine)
if err != nil {
nets_ok = false
}
}
if nets_ok {
// there were no errors reported when configuring networks
d.SetPartial("networks")
}
if ( disks_ok && nets_ok && pfws_ok ) {
// if there were no errors in setting any of the subresources, we may leave Partial mode
d.Partial(false)
}
// resourceComputeRead will also update resource ID on success, so that Terraform will know
// that resource exists
return resourceComputeRead(d, m)
}
func resourceComputeRead(d *schema.ResourceData, m interface{}) error {
log.Printf("resourceComputeRead: called for VM name %q, ResGroupID %d",
d.Get("name").(string), d.Get("rgid").(int))
comp_facts, err := utilityComputeCheckPresence(d, m)
if comp_facts == "" {
if err != nil {
return err
}
// VM was not found
return nil
}
if err = flattenCompute(d, comp_facts); err != nil {
return err
}
log.Printf("resourceComputeRead: after flattenCompute: VM ID %s, VM name %q, ResGroupID %d",
d.Id(), d.Get("name").(string), d.Get("rgid").(int))
// Not all parameters, that we may need, are returned by machines/get API
// Continue with further reading of VM subresource parameters:
controller := m.(*ControllerCfg)
url_values := &url.Values{}
/*
// Obtain information on external networks
url_values.Add("machineId", d.Id())
body_string, err := controller.decortAPICall("POST", VmExtNetworksListAPI, url_values)
if err != nil {
return err
}
net_list := ExtNetworksResp{}
err = json.Unmarshal([]byte(body_string), &net_list)
if err != nil {
return err
}
if len(net_list) > 0 {
if err = d.Set("networks", flattenNetworks(net_list)); err != nil {
return err
}
}
*/
/*
// Ext networks flattening is now done inside flattenCompute because it is currently based
// on data read into NICs component by machine/get API call
if err = d.Set("networks", flattenNetworks()); err != nil {
return err
}
*/
//
// Obtain information on port forwards
url_values.Add("cloudspaceId", fmt.Sprintf("%d",d.Get("rgid")))
url_values.Add("machineId", d.Id())
pfw_list := PortforwardsResp{}
body_string, err := controller.decortAPICall("POST", PortforwardsListAPI, url_values)
if err != nil {
return err
}
err = json.Unmarshal([]byte(body_string), &pfw_list)
if err != nil {
return err
}
if len(pfw_list) > 0 {
if err = d.Set("port_forwards", flattenPortforwards(pfw_list)); err != nil {
return err
}
}
return nil
}
func resourceComputeUpdate(d *schema.ResourceData, m interface{}) error {
log.Printf("resourceComputeUpdate: called for VM name %q, ResGroupID %d",
d.Get("name").(string), d.Get("rgid").(int))
return resourceComputeRead(d, m)
}
func resourceComputeDelete(d *schema.ResourceData, m interface{}) error {
// NOTE: this method destroys target VM with flag "permanently", so there is no way to
// restore destroyed VM
log.Printf("resourceComputeDelete: called for VM name %q, ResGroupID %d",
d.Get("name").(string), d.Get("rgid").(int))
comp_facts, err := utilityComputeCheckPresence(d, m)
if comp_facts == "" {
// the target VM does not exist - in this case according to Terraform best practice
// we exit from Destroy method without error
return nil
}
params := &url.Values{}
params.Add("machineId", d.Id())
params.Add("permanently", "true")
controller := m.(*ControllerCfg)
comp_facts, err = controller.decortAPICall("POST", MachineDeleteAPI, params)
if err != nil {
return err
}
return nil
}
func resourceComputeExists(d *schema.ResourceData, m interface{}) (bool, error) {
// Reminder: according to Terraform rules, this function should not modify its ResourceData argument
log.Printf("resourceComputeExist: called for VM name %q, ResGroupID %d",
d.Get("name").(string), d.Get("rgid").(int))
comp_facts, err := utilityComputeCheckPresence(d, m)
if comp_facts == "" {
if err != nil {
return false, err
}
return false, nil
}
return true, nil
}
func resourceCompute() *schema.Resource {
return &schema.Resource {
SchemaVersion: 1,
Create: resourceComputeCreate,
Read: resourceComputeRead,
Update: resourceComputeUpdate,
Delete: resourceComputeDelete,
Exists: resourceComputeExists,
Timeouts: &schema.ResourceTimeout {
Create: &Timeout180s,
Read: &Timeout30s,
Update: &Timeout180s,
Delete: &Timeout60s,
Default: &Timeout60s,
},
Schema: map[string]*schema.Schema {
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of this virtual machine. This parameter is case sensitive.",
},
"rgid": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the resource group where this virtual machine should be deployed.",
},
"cpu": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntBetween(1, 64),
Description: "Number of CPUs to allocate to this virtual machine.",
},
"ram": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(512),
Description: "Amount of RAM in MB to allocate to this virtual machine.",
},
"image_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "ID of the OS image to base this virtual machine on.",
},
"boot_disk": {
Type: schema.TypeList,
Required: true,
MaxItems: 1,
Elem: &schema.Resource {
Schema: diskSubresourceSchema(),
},
Description: "Specification for a boot disk on this virtual machine.",
},
"data_disks": {
Type: schema.TypeList,
Optional: true,
MaxItems: 12,
Elem: &schema.Resource {
Schema: diskSubresourceSchema(),
},
Description: "Specification for data disks on this virtual machine.",
},
"guest_logins": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource {
Schema: loginsSubresourceSchema(),
},
Description: "Specification for guest logins on this virtual machine.",
},
"networks": {
Type: schema.TypeList,
Optional: true,
MaxItems: 8,
Elem: &schema.Resource {
Schema: networkSubresourceSchema(),
},
Description: "Specification for the networks to connect this virtual machine to.",
},
"nics": {
Type: schema.TypeList,
Computed: true,
MaxItems: 8,
Elem: &schema.Resource {
Schema: nicSubresourceSchema(),
},
Description: "Specification for the virutal NICs allocated to this virtual machine.",
},
"ssh_keys": {
Type: schema.TypeList,
Optional: true,
MaxItems: 12,
Elem: &schema.Resource {
Schema: sshSubresourceSchema(),
},
Description: "SSH keys to authorize on this virtual machine.",
},
"port_forwards": {
Type: schema.TypeList,
Optional: true,
MaxItems: 12,
Elem: &schema.Resource {
Schema: portforwardSubresourceSchema(),
},
Description: "Specification for the port forwards to configure for this virtual machine.",
},
"description": {
Type: schema.TypeString,
Optional: true,
Description: "Description of this virtual machine.",
},
"user": {
Type: schema.TypeString,
Computed: true,
Description: "Default login name for the guest OS on this virtual machine.",
},
"password": {
Type: schema.TypeString,
Computed: true,
Sensitive: true,
Description: "Default password for the guest OS login on this virtual machine.",
},
},
}
}

@ -0,0 +1,188 @@
/*
Copyright (c) 2019-2021 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file is part of Terraform (by Hashicorp) provider for Digital Energy Cloud Orchestration
Technology platfom.
Visit https://github.com/rudecs/terraform-provider-decort for full source code package and updates.
*/
package decort
import (
"encoding/json"
"fmt"
"net/url"
"strconv"
"github.com/hashicorp/terraform/helper/schema"
// "github.com/hashicorp/terraform/helper/validation"
)
/*
func (ctrl *ControllerCfg) utilityVmDisksProvision(mcfg *MachineConfig) error {
for index, disk := range mcfg.DataDisks {
url_values := &url.Values{}
// url_values.Add("machineId", fmt.Sprintf("%d", mcfg.ID))
url_values.Add("accountId", fmt.Sprintf("%d", mcfg.TenantID))
url_values.Add("gid", fmt.Sprintf("%d", mcfg.GridID))
url_values.Add("name", fmt.Sprintf("%s", disk.Label))
url_values.Add("description", fmt.Sprintf("Data disk for VM ID %d / VM Name: %s", mcfg.ID, mcfg.Name))
url_values.Add("size", fmt.Sprintf("%d", disk.Size))
url_values.Add("type", "D")
// url_values.Add("iops", )
disk_id_resp, err := ctrl.decortAPICall("POST", DiskCreateAPI, url_values)
if err != nil {
// failed to create disk - partial resource update
return err
}
// disk created - API call returns disk ID as a string - use it to update
// disk ID in the corresponding MachineConfig.DiskConfig record
mcfg.DataDisks[index].ID, err = strconv.Atoi(disk_id_resp)
if err != nil {
// failed to convert disk ID into proper integer value - partial resource update
return err
}
// now that we have disk created and stored its ID in the mcfg.DataDisks[index].ID
// we can attempt attaching the disk to the VM
url_values = &url.Values{}
// url_values.Add("machineId", fmt.Sprintf("%d", mcfg.ID))
url_values.Add("machineId", fmt.Sprintf("%d", mcfg.ID))
url_values.Add("diskId", disk_id_resp)
_, err = ctrl.decortAPICall("POST", DiskAttachAPI, url_values)
if err != nil {
// failed to attach disk - partial resource update
return err
}
}
return nil
}
func (ctrl *ControllerCfg) utilityVmPortforwardsProvision(mcfg *MachineConfig) error {
for _, rule := range mcfg.PortForwards {
url_values := &url.Values{}
url_values.Add("machineId", fmt.Sprintf("%d", mcfg.ID))
url_values.Add("cloudspaceId", fmt.Sprintf("%d", mcfg.ResGroupID))
url_values.Add("publicIp", mcfg.ExtIP) // this may be obsoleted by Resource group implementation
url_values.Add("publicPort", fmt.Sprintf("%d", rule.ExtPort))
url_values.Add("localPort", fmt.Sprintf("%d", rule.IntPort))
url_values.Add("protocol", rule.Proto)
_, err := ctrl.decortAPICall("POST", PortforwardingCreateAPI, url_values)
if err != nil {
// failed to create port forward rule - partial resource update
return err
}
}
return nil
}
func (ctrl *ControllerCfg) utilityVmNetworksProvision(mcfg *MachineConfig) error {
for _, net := range mcfg.Networks {
url_values := &url.Values{}
url_values.Add("machineId", fmt.Sprintf("%d", mcfg.ID))
url_values.Add("externalNetworkId", fmt.Sprintf("%d", net.NetworkID))
_, err := ctrl.decortAPICall("POST", AttachExternalNetworkAPI, url_values)
if err != nil {
// failed to attach network - partial resource update
return err
}
}
return nil
}
*/
func utilityComputeCheckPresence(d *schema.ResourceData, m interface{}) (string, error) {
// This function tries to locate Compute by one of the following approaches:
// - if compute_id is specified - locate by compute ID
// - if compute_name is specified - locate by a combination of compute name and resource
// group ID
//
// If succeeded, it returns non-empty string that contains JSON formatted facts about the
// Compute as returned by compute/get API call.
// Otherwise it returns empty string and meaningful error.
//
// This function does not modify its ResourceData argument, so it is safe to use it as core
// method for resource's Exists method.
//
compute_id, arg_set := d.GetOk("compute_id")
if arg_set {
// compute ID is specified, try to get compute instance straight by this ID
log.Debugf("utilityComputeCheckPresence: locating compute by its ID %d", compute_id.(int))
url_values.Add("computeId", fmt.Sprintf("%d", compute_id.(int)))
compute_facts, err := controller.decortAPICall("POST", ComputeGetAPI, url_values)
if err != nil {
return "", err
}
return compute_facts, nil
}
compute_name, arg_set := d.GetOk("name")
if !arg_set {
return "", fmt.Error("Cannot locate compute instance if name is empty and no compute ID specified.")
}
rg_id, arg_set := d.GetOk("rg_id")
if !arg_set {
return "", fmt.Error("Cannot locate compute by name %s if no resource group ID is set", compute_name.(string))
}
controller := m.(*ControllerCfg)
list_url_values := &url.Values{}
list_url_values.Add("rgId", fmt.Sprintf("%d",rg_id))
api_resp, err := controller.decortAPICall("POST", RgListComputesAPI, list_url_values)
if err != nil {
return "", err
}
log.Debugf("utilityComputeCheckPresence: ready to unmarshal string %q", api_resp)
comp_list := RgListComputesResp{}
err = json.Unmarshal([]byte(api_resp), &comp_list)
if err != nil {
return "", err
}
// log.Printf("%#v", comp_list)
log.Debugf("utilityComputeCheckPresence: traversing decoded JSON of length %d", len(comp_list))
for _, item := range comp_list {
// need to match Compute by name, skip Computes with the same name in DESTROYED satus
if item.Name == name && item.Status != "DESTROYED" {
log.Debugf("utilityComputeCheckPresence: index %d, matched name %q", index, item.Name)
// we found the Compute we need - now get detailed information via compute/get API
get_url_values := &url.Values{}
get_url_values.Add("computeId", fmt.Sprintf("%d", item.ID))
api_resp, err = controller.decortAPICall("POST", ComputeGetAPI, get_url_values)
if err != nil {
return "", err
}
return api_resp, nil
}
}
return "", nil // there should be no error if Compute does not exist
// return "", fmt.Errorf("Cannot find Compute name %q in resource group ID %d", name, rgid)
}

@ -0,0 +1,207 @@
/*
Copyright (c) 2019-2021 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file is part of Terraform (by Hashicorp) provider for Digital Energy Cloud Orchestration
Technology platfom.
Visit https://github.com/rudecs/terraform-provider-decort for full source code package and updates.
*/
package decort
import (
"encoding/json"
"fmt"
"log"
// "net/url"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
)
func flattenDisk(d *schema.ResourceData, disk_facts string) error {
// This function expects disk_facts string to contain a response from disks/get API
//
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceDiskExists(...) method
model := DiskRecord{}
log.Debugf("flattenDisk: ready to unmarshal string %q", disk_facts)
err := json.Unmarshal([]byte(disk_facts), &model)
if err != nil {
return err
}
log.Debugf("flattenDisk: disk ID %d, disk AccountID %d", model.ID, model.AccountID)
d.SetId(fmt.Sprintf("%d", model.ID))
d.Set("disk_id", model.ID)
d.Set("name", model.Name)
d.Set("account_id", model.AccountID)
d.Set("account_name", model.AccountName)
d.Set("size", model.SizeMax)
// d.Set("sizeUsed", model.SizeUsed)
d.Set("type", model.Type)
d.Set("image_id", model.ImageID)
d.Set("sep_id", model.SepID)
d.Set("sep_type", model.SepType)
d.Set("pool", model.Pool)
d.Set("compute_id", model.ComputeID)
d.Set("description", model.Desc)
d.Set("status", model.Status)
d.Set("tech_status", model.TechStatus)
/* we do not manage snapshots via Terraform yet, so keep this commented out for a while
if len(model.Snapshots) > 0 {
log.Debugf("flattenDisk: calling flattenDiskSnapshots")
if err = d.Set("nics", flattenDiskSnapshots(model.Snapshots)); err != nil {
return err
}
}
*/
return nil
}
func dataSourceDiskRead(d *schema.ResourceData, m interface{}) error {
disk_facts, err := utilityDiskCheckPresence(d, m)
if disk_facts == "" {
// if empty string is returned from utilityDiskCheckPresence then there is no
// such Disk and err tells so - just return it to the calling party
d.SetId("") // ensure ID is empty
return err
}
return flattenDisk(d, disk_facts)
}
func dataSourceDiskSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema {
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Name of this disk. NOTE: disk names are NOT unique within an account.",
},
"disk_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the disk to get. If disk ID is specified, then name, account and account ID are ignored.",
},
"account_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the account this disk belongs to.",
},
"account_name": {
Type: schema.TypeString,
Optional: true,
Description: "Name of the account this disk belongs to. If account ID is specified, account name is ignored.",
},
"description": {
Type: schema.TypeString,
Computed: true,
Description: "User-defined text description of this disk.",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the image, which this disk was cloned from.",
},
"size": {
Type: schema.TypeInt,
Computed: true,
Description: "Size of the disk in GB.",
},
"type": {
Type: schema.TypeString,
Computed: true,
Description: "Type of this disk.",
},
/*
"snapshots": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource {
Schema: snapshotSubresourceSchemaMake(),
},
Description: "List of user-created snapshots for this disk."
},
*/
"sep_id": {
Type: schema.TypeString,
Computed: true,
Description: "Storage end-point provider serving this disk.",
},
"sep_type": {
Type: schema.TypeString,
Computed: true,
Description: "Type of the storage end-point provider serving this disk.",
},
"pool": {
Type: schema.TypeString,
Computed: true,
Description: "Pool where this disk is located.",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Current model status of this disk.",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "Current technical status of this disk.",
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the compute instance where this disk is attached to, or 0 for unattached disk.",
},
}
return ret
}
func dataSourceDisk() *schema.Resource {
return &schema.Resource {
SchemaVersion: 1,
Read: dataSourceDiskRead,
Timeouts: &schema.ResourceTimeout {
Read: &Timeout30s,
Default: &Timeout60s,
},
Schema: dataSourceDiskSchemaMake(),
}

@ -0,0 +1,128 @@
/*
Copyright (c) 2019-2021 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file is part of Terraform (by Hashicorp) provider for Digital Energy Cloud Orchestration
Technology platfom.
Visit https://github.com/rudecs/terraform-provider-decort for full source code package and updates.
*/
package decort
import (
"encoding/json"
"fmt"
"net/url"
"strconv"
"github.com/hashicorp/terraform/helper/schema"
// "github.com/hashicorp/terraform/helper/validation"
)
func utilityDiskCheckPresence(d *schema.ResourceData, m interface{}) (string, error) {
// This function tries to locate Disk by one of the following algorithms depending on
// the parameters passed:
// - if disk ID is specified -> by disk ID
// - if disk name is specifeid -> by disk name and either account ID or account name
//
// NOTE: disk names are not unique, so the first occurence of this name in the account will
// be returned. There is no such ambiguity when locating disk by its ID.
//
// If succeeded, it returns non empty string that contains JSON formatted facts about the disk
// as returned by disks/get API call.
// Otherwise it returns empty string and meaningful error.
//
// This function does not modify its ResourceData argument, so it is safe to use it as core
// method for resource's Exists method.
//
controller := m.(*ControllerCfg)
url_values := &url.Values{}
disk_id, arg_set := d.GetOk("disk_id")
if arg_set {
// go straight for the disk by its ID
log.Debugf("utilityDiskCheckPresence: locating disk by its ID %d", disk_id.(int))
url_values.Add("diskId", fmt.Sprintf("%d", disk_id.(int)))
disk_facts, err := controller.decortAPICall("POST", DisksGetAPI, url_values)
if err != nil {
return "", err
}
return disk_facts, nil
}
disk_name, arg_set := d.GetOk("name")
if !arg_set {
// no disk ID and no disk name - we cannot locate disk in this case
return "", fmt.Error("Cannot locate disk if name is empty and no disk ID specified.")
}
account_id, acc_id_set := d.GetOk("account_id")
if !acc_id_set {
account_name, arg_set := d.GetOkd("account_name")
if !arg_set {
return "", fmt.Error("Cannot locate disk by name %s if neither account ID nor account name are set", disk_name.(string))
}
}
url_values.Add("accountId", fmt.Sprintf("%d", account_id.(int)))
disk_facts, err := controller.decortAPICall("POST", DisksListAPI, url_values)
if err != nil {
return "", err
}
log.Debugf("utilityDiskCheckPresence: ready to unmarshal string %q", disk_facts)
disks_list := []DiskRecord
err = json.Unmarshal([]byte(disk_facts), &disks_list)
if err != nil {
return "", err
}
// log.Printf("%#v", vm_list)
log.Debugf("utilityDiskCheckPresence: traversing decoded JSON of length %d", len(disks_list))
for _, item := range disks_list {
// need to match disk by name, return the first match
if item.Name == disk_name && item.Status != "DESTROYED" {
log.Printf("utilityDiskCheckPresence: index %d, matched disk name %q", index, item.Name)
// we found the disk we need - not get detailed information via API call to disks/get
/*
// TODO: this may not be optimal as it initiates one extra call to the DECORT controller
// in spite of the fact that we already have all required information about the disk in
// item variable
//
get_url_values := &url.Values{}
get_url_values.Add("diskId", fmt.Sprintf("%d", item.ID))
disk_facts, err = controller.decortAPICall("POST", DisksGetAPI, get_url_values)
if err != nil {
return "", err
}
return disk_facts, nil
*/
reencoded_item, err := json.Marshal(item)
if err != nil {
return "", err
}
return reencoded_item.(string), nil
}
}
return "", nil // there should be no error if disk does not exist
}

@ -0,0 +1,111 @@
/*
Copyright (c) 2019-2020 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file is part of Terraform (by Hashicorp) provider for Digital Energy Cloud Orchestration
Technology platfom.
Visit https://github.com/rudecs/terraform-provider-decort for full source code package and updates.
*/
package decort
import (
"encoding/json"
"fmt"
"log"
"net/url"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
)
func dataSourceImageRead(d *schema.ResourceData, m interface{}) error {
name := d.Get("name").(string)
rgid, rgid_set := d.GetOk("rgid")
tenant_id, tenant_set := d.GetOk("tenant_id")
controller := m.(*ControllerCfg)
url_values := &url.Values{}
if tenant_set {
url_values.Add("accountId", fmt.Sprintf("%d",tenant_id.(int)))
}
if rgid_set {
url_values.Add("cloudspaceId", fmt.Sprintf("%d",rgid.(int)))
}
body_string, err := controller.decortAPICall("POST", ImagesListAPI, url_values)
if err != nil {
return err
}
log.Printf("dataSourceImageRead: ready to decode response body")
model := ImagesListResp{}
err = json.Unmarshal([]byte(body_string), &model)
if err != nil {
return err
}
log.Printf("%#v", model)
log.Printf("dataSourceImageRead: traversing decoded JSON of length %d", len(model))
for index, item := range model {
// need to match VM by name
if item.Name == name {
log.Printf("dataSourceImageRead: index %d, matched name %q", index, item.Name)
d.SetId(fmt.Sprintf("%d", model[index].ID))
// d.Set("field_name", value)
return nil
}
}
return fmt.Errorf("Cannot find OS Image name %q", name)
}
func dataSourceImage() *schema.Resource {
return &schema.Resource {
SchemaVersion: 1,
Read: dataSourceImageRead,
Timeouts: &schema.ResourceTimeout {
Read: &Timeout30s,
Default: &Timeout60s,
},
Schema: map[string]*schema.Schema {
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of the OS image to locate. This parameter is case sensitive.",
},
"tenant_id": {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the tenant to limit image search to.",
},
"rgid": {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the resource group to limit image search to.",
},
},
}
}

@ -175,6 +175,24 @@ type ResgroupUpdateParam struct {
// //
const ResgroupDeleteAPI = "/restmachine/cloudapi/rg/delete" const ResgroupDeleteAPI = "/restmachine/cloudapi/rg/delete"
//
// structures related to /cloudapi/rg/listComputes API
//
type ComputeBriefRecord struct { // this is a brief compute specifiaction as returned by API rg/listComputes
// we do not even include here all fields as returned by this API, but only the most important that
// are really necessary to identify and distinguish computes
AccountID int `json:"accountId"`
AccountName string `json:"accountName"`
Name string `json:"name"`
ID uint `json:"id"`
RgID int `json:"rgId"`
RgName string `json:"rgName"`
Status string `json:"status"`
TechStatus string `json:"techStatus"`
}
const RgListComputesAPI = "/restmachine/cloudapi/rg/listComputes"
type RgListComputesResp []ComputeBriefRecord
// //
// structures related to /cloudapi/kvmXXX/create APIs // structures related to /cloudapi/kvmXXX/create APIs
// //
@ -198,11 +216,6 @@ type KvmVmCreateParam struct { // this is unified structure for both x86 and PPC
// structures related to cloudapi/compute/delete API // structures related to cloudapi/compute/delete API
const ComputeDeleteAPI = "/restmachine/cloudapi/compute/delete" const ComputeDeleteAPI = "/restmachine/cloudapi/compute/delete"
type ComputeDeleteParam struct {
ComputeID int `json:"computeId"`
Permanently bool `json:"permanently"`
}
// //
// structures related to /cloudapi/compute/list API // structures related to /cloudapi/compute/list API
// //
@ -270,9 +283,6 @@ type ComputeRecord struct {
} }
const ComputeListAPI = "/restmachine/cloudapi/compute/list" const ComputeListAPI = "/restmachine/cloudapi/compute/list"
type ComputeListParam struct {
IncludeDeleted bool `json:"includedeleted"`
}
type ComputeListResp []ComputeRecord type ComputeListResp []ComputeRecord
// //

@ -67,7 +67,7 @@ func makeQuotaRecord(arg_list []interface{}) (QuotaRecord, int) {
return quota, 1 return quota, 1
} }
func flattenQuota(quota QuotaRecord) []interface{} { func parseQuota(quota QuotaRecord) []interface{} {
quota_map := make(map[string]interface{}) quota_map := make(map[string]interface{})
quota_map["cpu"] = quota.Cpu quota_map["cpu"] = quota.Cpu
@ -80,7 +80,7 @@ func flattenQuota(quota QuotaRecord) []interface{} {
result := make([]interface{}, 1) result := make([]interface{}, 1)
result[0] = quota_map result[0] = quota_map
return result return result // this result will be used to d.Set("quota,") of dataSourceResgroup schema
} }
func quotaRgSubresourceSchema() map[string]*schema.Schema { func quotaRgSubresourceSchema() map[string]*schema.Schema {

@ -0,0 +1,175 @@
/*
Copyright (c) 2019-2021 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file is part of Terraform (by Hashicorp) provider for Digital Energy Cloud Orchestration
Technology platfom.
Visit https://github.com/rudecs/terraform-provider-decort for full source code package and updates.
*/
package decort
import (
"encoding/json"
"fmt"
"log"
// "net/url"
"github.com/hashicorp/terraform/helper/schema"
// "github.com/hashicorp/terraform/helper/validation"
)
func flattenResgroup(d *schema.ResourceData, rg_facts string) error {
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceRsgroupExists(...) method
log.Debugf("%s", rg_facts)
log.Debugf("flattenResgroup: ready to decode response body from %q", CloudspacesGetAPI)
details := ResgroupGetResp{}
err := json.Unmarshal([]byte(rg_facts), &details)
if err != nil {
return err
}
log.Debugf("flattenResgroup: decoded ResGroup name %q / ID %d, account ID %d, public IP %q",
details.Name, details.ID, details.AccountID, details.PublicIP)
d.SetId(fmt.Sprintf("%d", details.ID))
d.Set("name", details.Name)
d.Set("account_id", details.AccountID)
d.Set("grid_id", details.GridID)
d.Set("desc", details.Description)
d.Set("status", details.Status)
d.Set("def_net", details.DefaultNetType)
d.Set("def_net_id", details.DefaultNetID)
d.Set("vins", details.Vins)
d.Set("computes", details.Computes)
log.Debugf("flattenResgroup: calling flattenQuota()")
if err = d.Set("quotas", parseQuota(details.Quotas)); err != nil {
return err
}
return nil
}
func dataSourceResgroupRead(d *schema.ResourceData, m interface{}) error {
rg_facts, err := utilityResgroupCheckPresence(d, m)
if rg_facts == "" {
// if empty string is returned from utilityResgroupCheckPresence then there is no
// such resource group and err tells so - just return it to the calling party
d.SetId("") // ensure ID is empty in this case
return err
}
return flattenResgroup(d, rg_facts)
}
func dataSourceResgroup() *schema.Resource {
return &schema.Resource {
SchemaVersion: 1,
Read: dataSourceResgroupRead,
Timeouts: &schema.ResourceTimeout {
Read: &Timeout30s,
Default: &Timeout60s,
},
Schema: map[string]*schema.Schema {
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of this resource group. Names are case sensitive and unique within the context of an account.",
},
"account": &schema.Schema {
Type: schema.TypeString,
Required: true,
Description: "Name of the account, which this resource group belongs to.",
},
"account_id": &schema.Schema {
Type: schema.TypeInt,
Computed: true,
Description: "Unique ID of the account, which this resource group belongs to.",
},
"desc": &schema.Schema {
Type: schema.TypeString,
Computed: true,
Description: "User-defined text description of this resource group.",
},
"grid_id": &schema.Schema {
Type: schema.TypeInt,
Computed: true,
Description: "Unique ID of the grid, where this resource group is deployed.",
},
"quotas": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource {
Schema: quotaRgSubresourceSchema(), // this is a dictionary
},
Description: "Quotas on the resources for this resource group.",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Current status of this resource group.",
},
"def_net": &schema.Schema {
Type: schema.TypeString,
Computed: true,
Description: "Type of the default network for this resource group.",
},
"def_net_id": &schema.Schema {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the default network for this resource group (if any).",
},
"vins": {
Type: schema.TypeList, // this is a list of ints
Computed: true,
MaxItems: LimitMaxVinsPerResgroup,
Elem: &schema.Schema {
Type: schema.TypeInt,
},
Description: "List of VINs deployed in this resource group.",
},
"computes": {
Type: schema.TypeList, //t his is a list of ints
Computed: true,
Elem: &schema.Schema {
Type: schema.TypeInt,
},
Description: "List of computes deployed in this resource group.",
},
},
}
}

@ -0,0 +1,401 @@
/*
Copyright (c) 2019-2021 Digital Energy Cloud Solutions. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file is part of Terraform (by Hashicorp) provider for Digital Energy Cloud Orchestration
Technology platfom.
Visit https://github.com/rudecs/terraform-provider-decort for full source code package and updates.
*/
package decort
import (
"fmt"
"log"
"net/url"
"strconv"
"strings"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceResgroupCreate(d *schema.ResourceData, m interface{}) error {
// First validate that we have all parameters required to create the new Resource Group
arg_set := false
account_name, arg_set := d.GetOk("account")
if !arg_set {
return fmt.Errorf("Cannot create new RG: missing account.")
}
rg_name, arg_set := d.GetOk("name")
if !arg_set {
return fmt.Errorf("Cannot create new RG: missing name.")
}
grid_id, arg_set := d.GetOk("grid_id")
if !arg_set {
return fmt.Errorf("Cannot create new RG %q for account %q: missing Grid ID.",
rg_name.(string), account_name.(string))
}
// all required parameters are set in the schema - we can continue with RG creation
log.Debugf("resourceResgroupCreate: called for RG name %q, account name %q",
account_name.(string), rg_name.(string))
// Valid account ID is required to create new resource group
// obtain Account ID by account name - it should not be zero on success
validated_account_id, err := utilityGetAccountIdByName(account_name.(string), m)
if err != nil {
return err
}
// quota settings are optional
set_quota := false
var quota_record QuotaRecord
arg_value, arg_set = d.GetOk("quota")
if arg_set {
log.Debugf("resourceResgroupCreate: setting Quota on RG requested")
quota_record, _ = makeQuotaRecord(arg_value.([]interface{}))
set_quota = true
}
controller := m.(*ControllerCfg)
log.Debugf("resourceResgroupCreate: called by user %q for RG name %q, account %q / ID %d, Grid ID %d",
controller.getdecortUsername(),
rg_name.(string), account_name.(string), validated_account_id, gird_id.(int))
/*
type ResgroupCreateParam struct {
AccountID int `json:"accountId"`
GridId int `json:"gid"`
Name string `json:"name"`
Ram int `json:"maxMemoryCapacity"`
Disk int `json:"maxVDiskCapacity"`
Cpu int `json:"maxCPUCapacity"`
NetTraffic int `json:"maxNetworkPeerTransfer"`
ExtIPs int `json:"maxNumPublicIP"`
Owner string `json:"owner"`
DefNet string `json:"def_net"`
IPCidr string `json:"ipcidr"`
Desc string `json:"decs"`
Reason string `json:"reason"`
ExtNetID int `json:"extNetId"`
ExtIP string `json:"extIp"`
}
*/
url_values := &url.Values{}
url_values.Add("accountId", fmt.Sprintf("%d", validated_account_id))
url_values.Add("name", rg_name.(string))
url_values.Add("gid", fmt.Sprintf("%d", grid_id.(int)))
url_values.Add("owner", controller.getdecortUsername())
// pass quota values as set
if set_quota {
url_values.Add("maxCPUCapacity", fmt.Sprintf("%d", quota_record.Cpu))
url_values.Add("maxVDiskCapacity", fmt.Sprintf("%d", quota_record.Disk))
url_values.Add("maxMemoryCapacity", fmt.Sprintf("%d", quota_record.Ram))
url_values.Add("maxNetworkPeerTransfer", fmt.Sprintf("%d", quota_record.ExtTraffic))
url_values.Add("maxNumPublicIP", fmt.Sprintf("%d", quota_record.ExtIPs))
// url_values.Add("???", fmt.Sprintf("%d", quota_record.GpuUnits))
}
// parse and handle network settings
def_net_type, arg_set = d.GetOk("def_net_type")
if arg_set {
ulr_values.Add("def_net", def_net_type.(string))
}
ipcidr, arg_set = d.GetOk("ipcidr")
if arg_set {
ulr_values.Add("ipcidr", ipcidr.(string))
}
ext_net_id, arg_set = d.GetOk("ext_net_id")
if arg_set {
ulr_values.Add("extNetId", ext_net_id.(int))
}
ext_ip, arg_set = d.GetOk("ext_ip")
if arg_set {
ulr_values.Add("extIp", ext_ip.(string))
}
api_resp, err := controller.decortAPICall("POST", ResgroupCreateAPI, url_values)
if err != nil {
return err
}
d.SetId(api_resp) // rg/create API returns ID of the newly creted resource group on success
rg.ID, _ = strconv.Atoi(api_resp)
// re-read newly created RG to make sure schema contains complete and up to date set of specifications
return resourceResgroupRead(d, m)
}
func resourceResgroupRead(d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceResgroupRead: called for RG name %q, account name %q",
d.Get("name").(string), d.Get("account").(string))
rg_facts, err := utilityResgroupCheckPresence(d, m)
if rg_facts == "" {
// if empty string is returned from utilityResgroupCheckPresence then there is no
// such resource group and err tells so - just return it to the calling party
d.SetId("") // ensure ID is empty
return err
}
return flattenResgroup(d, rg_facts)
}
func resourceResgroupUpdate(d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceResgroupUpdate: called for RG name %q, account name %q",
d.Get("name").(string), d.Get("account").(string))
do_update := false
controller := m.(*ControllerCfg)
url_values := &url.Values{}
url_values.Add("rgId", d.Id())
name_new, name_set := d.GetOk("name")
if name_set {
log.Debugf("resourceResgroupUpdate: name specified - looking for deltas from the old settings.")
name_old, _ := d.GetChange("name")
if name_old.(string) != name_new.(string) {
do_update := true
url_values.Add("name", name_new.(string))
}
}
quota_value, quota_set := d.GetOk("quota")
if quota_set {
log.Debugf("resourceResgroupUpdate: quota specified - looking for deltas from the old quota.")
quotarecord_new, _ := makeQuotaRecord(quota_value.([]interface{}))
quota_value_old, _ = d.GetChange("quota") // returns old as 1st, new as 2nd return value
quotarecord_old, _ := makeQuotaRecord(quota_value_old.([]interface{}))
if quotarecord_new.Cpu != quotarecord_old.Cpu {
do_update = true
log.Debugf("resourceResgroupUpdate: Cpu diff %d <- %d", quotarecord_new.Cpu, quotarecord_old.Cpu)
url_values.Add("maxCPUCapacity", fmt.Sprintf("%d", quotarecord_new.Cpu))
}
if quotarecord_new.Disk != quotarecord_old.Disk {
do_update = true
log.Debugf("resourceResgroupUpdate: Disk diff %d <- %d", quotarecord_new.Disk, quotarecord_old.Disk)
url_values.Add("maxVDiskCapacity", fmt.Sprintf("%d", quotarecord_new.Disk))
}
if quotarecord_new.Ram != quotarecord_old.Ram {
do_update = true
log.Debugf("resourceResgroupUpdate: Ram diff %f <- %f", quotarecord_new.Ram, quotarecord_old.Ram)
url_values.Add("maxMemoryCapacity", fmt.Sprintf("%f", quotarecord_new.Ram))
}
if quotarecord_new.ExtTraffic != quotarecord_old.ExtTraffic {
do_update = true
log.Debugf("resourceResgroupUpdate: NetTraffic diff %d <- %d", quotarecord_new.ExtTraffic, quotarecord_old.ExtTraffic)
url_values.Add("maxNetworkPeerTransfer", fmt.Sprintf("%d", quotarecord_new.NetTraffic))
}
if quotarecord_new.ExtIPs != quotarecord_old.ExtIPs {
do_update = true
log.Debugf("resourceResgroupUpdate: ExtIPs diff %d <- %d", quotarecord_new.ExtIPs, quotarecord_old.ExtIPs)
url_values.Add("maxNumPublicIP", fmt.Sprintf("%d", quotarecord_new.ExtIPs))
}
}
desc_new, desc_set := d.GetOk("desc")
if desc_set {
log.Debugf("resourceResgroupUpdate: description specified - looking for deltas from the old settings.")
desc_old, _ := d.GetChange("desc")
if desc_old.(string) != desc_new.(string) {
do_update := true
url_values.Add("desc", desc_new.(string))
}
}
if do_update {
log.Debugf("resourceResgroupUpdate: detected delta between new and old RG specs - updating the RG")
_, err := controller.decortAPICall("POST", ResgroupUpdateAPI, url_values)
if err != nil {
return err
}
} else {
log.Debugf("resourceResgroupUpdate: no difference between old and new state - no update on the RG will be done")
}
return resourceResgroupRead(d, m)
}
func resourceResgroupDelete(d *schema.ResourceData, m interface{}) error {
// NOTE: this method forcibly destroys target resource group with flag "permanently", so there is no way to
// restore the destroyed resource group as well all Computes & VINSes that existed in it
log.Debugf("resourceResgroupDelete: called for RG name %q, account name %q",
d.Get("name").(string), d.Get("account").(string))
rg_facts, err := utilityResgroupCheckPresence(d, m)
if rg_facts == "" {
// the target RG does not exist - in this case according to Terraform best practice
// we exit from Destroy method without error
return nil
}
url_values := &url.Values{}
url_values.Add("rgId", d.Id())
url_values.Add("force", "true")
url_values.Add("permanently", "true")
url_values.Add("reason", "Destroyed by DECORT Terraform provider")
controller := m.(*ControllerCfg)
_, err = controller.decortAPICall("POST", ResgroupDeleteAPI, url_values)
if err != nil {
return err
}
return nil
}
func resourceResgroupExists(d *schema.ResourceData, m interface{}) (bool, error) {
// Reminder: according to Terraform rules, this function should NOT modify ResourceData argument
rg_facts, err := utilityResgroupCheckPresence(d, m)
if rg_facts == "" {
if err != nil {
return false, err
}
return false, nil
}
return true, nil
}
func resourceResgroup() *schema.Resource {
return &schema.Resource {
SchemaVersion: 1,
Create: resourceResgroupCreate,
Read: resourceResgroupRead,
Update: resourceResgroupUpdate,
Delete: resourceResgroupDelete,
Exists: resourceResgroupExists,
Timeouts: &schema.ResourceTimeout {
Create: &Timeout180s,
Read: &Timeout30s,
Update: &Timeout180s,
Delete: &Timeout60s,
Default: &Timeout60s,
},
Schema: map[string]*schema.Schema {
"name": &schema.Schema {
Type: schema.TypeString,
Required: true,
Description: "Name of this resource group. Names are case sensitive and unique within the context of a account.",
},
"account": &schema.Schema {
Type: schema.TypeString,
Required: true,
Description: "Name of the account, which this resource group belongs to.",
},
"def_net": &schema.Schema {
Type: schema.TypeString,
Optional: true,
Default: "PRIVATE"
Description: "Type of the network, which this resource group will use as default for its computes - PRIVATE or PUBLIC or NONE.",
},
"ipcidr": &schema.Schema {
Type: schema.TypeString,
Optional: true,
Description: "Address of the netowrk inside the private network segment (aka ViNS) if def_net=PRIVATE",
},
"ext_net_id": &schema.Schema {
Type: schema.TypeInt,
Optional: true,
Default: 0,
Description: "ID of the external network, which this resource group will use as default for its computes if def_net=PUBLIC",
},
"ext_ip": &schema.Schema {
Type: schema.TypeString,
Optional: true,
Description: "IP address on the external netowrk to request, if def_net=PUBLIC",
},
"account_id": &schema.Schema {
Type: schema.TypeInt,
Computed: true,
Description: "Unique ID of the account, which this resource group belongs to.",
},
"grid_id": &schema.Schema {
Type: schema.TypeInt,
Required: true,
Description: "Unique ID of the grid, where this resource group is deployed.",
},
"quota": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource {
Schema: quotasSubresourceSchema(),
},
Description: "Quota settings for this resource group.",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Description: "User-defined text description of this resource group.",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Current status of this resource group.",
},
"def_net_id": &schema.Schema {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the default network for this resource group (if any).",
},
"vins": {
Type: schema.TypeList, // this is a list of ints
Computed: true,
MaxItems: LimitMaxVinsPerResgroup,
Elem: &schema.Schema {
Type: schema.TypeInt,
},
Description: "List of VINs deployed in this resource group.",
},
"computes": {
Type: schema.TypeList, // this is a list of ints
Computed: true,
Elem: &schema.Schema {
Type: schema.TypeInt,
},
Description: "List of computes deployed in this resource group.",
},
},
}
}

@ -0,0 +1,154 @@
/*
Copyright (c) 2019-2021 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file is part of Terraform (by Hashicorp) provider for Digital Energy Cloud Orchestration
Technology platfom.
Visit https://github.com/rudecs/terraform-provider-decort for full source code package and updates.
*/
package decort
import (
"encoding/json"
"fmt"
"log"
"net/url"
// "strconv"
"github.com/hashicorp/terraform/helper/schema"
// "github.com/hashicorp/terraform/helper/validation"
)
func (ctrl *ControllerCfg) utilityResgroupConfigGet(rgid int) (*ResgroupGetResp, error) {
url_values := &url.Values{}
url_values.Add("rgId", fmt.Sprintf("%d", rgid))
resgroup_facts, err := ctrl.decortAPICall("POST", ResgroupGetAPI, url_values)
if err != nil {
return nil, err
}
log.Debugf("utilityResgroupConfigGet: ready to unmarshal string %q", resgroup_facts)
model := &ResgroupGetResp{}
err = json.Unmarshal([]byte(resgroup_facts), model)
if err != nil {
return nil, err
}
/*
ret := &ResgroupConfig{}
ret.AccountID = model.AccountID
ret.Location = model.Location
ret.Name = model.Name
ret.ID = rgid
ret.GridID = model.GridID
ret.ExtIP = model.ExtIP // legacy field for VDC - this will eventually become obsoleted by true Resource Groups
// Quota ResgroupQuotaConfig
// Network NetworkConfig
*/
log.Debugf("utilityResgroupConfigGet: account ID %d, GridID %d, Name %s",
model.AccountID, model.GridID, model.Name)
return model, nil
}
// On success this function returns a string, as returned by API rg/get, which could be unmarshalled
// into ResgroupGetResp structure
func utilityResgroupCheckPresence(d *schema.ResourceData, m interface{}) (string, error) {
// This function tries to locate resource group by its name and account name.
// If succeeded, it returns non empty string that contains JSON formatted facts about the
// resource group as returned by cloudspaces/get API call.
// Otherwise it returns empty string and meaningful error.
//
// NOTE: As our provider always deletes RGs permanently, there is no "restore" method and
// consequently we are not interested in matching RGs in DELETED state. Hence, we call
// .../rg/list API with includedeleted=false
//
// This function does not modify its ResourceData argument, so it is safe to use it as core
// method for the Terraform resource Exists method.
//
name := d.Get("name").(string)
account_name := d.Get("account").(string)
controller := m.(*ControllerCfg)
url_values := &url.Values{}
url_values.Add("includedeleted", "false")
body_string, err := controller.decortAPICall("POST", ResgroupListAPI, url_values)
if err != nil {
return "", err
}
log.Debugf("%s", body_string)
log.Debugf("utilityResgroupCheckPresence: ready to decode response body from %q", ResgroupListAPI)
model := ResgroupListResp{}
err = json.Unmarshal([]byte(body_string), &model)
if err != nil {
return "", err
}
log.Debugf("utilityResgroupCheckPresence: traversing decoded Json of length %d", len(model))
for index, item := range model {
// need to match RG by name & account name
if item.Name == name && item.AccountName == account_name {
log.Debugf("utilityResgroupCheckPresence: match RG name %q / ID %d, account %q at index %d",
item.Name, item.ID, item.AccountName, index)
// not all required information is returned by rg/list API, so we need to initiate one more
// call to rg/get to obtain extra data to complete Resource population.
// Namely, we need to extract resource quota settings
req_values := &url.Values{}
req_values.Add("rgId", fmt.Sprintf("%d", item.ID))
body_string, err := controller.decortAPICall("POST", ResgroupGetAPI, req_values)
if err != nil {
return "", err
}
return body_string, nil
}
}
return "", fmt.Errorf("Cannot find RG name %q owned by account %q", name, account_name)
}
func utilityGetAccountIdByName(account_name string, m interface{}) (int, error) {
controller := m.(*ControllerCfg)
url_values := &url.Values{}
body_string, err := controller.decortAPICall("POST", AccountsListAPI, url_values)
if err != nil {
return 0, err
}
model := AccountsListResp{}
err = json.Unmarshal([]byte(body_string), &model)
if err != nil {
return 0, err
}
log.Debugf("utilityGetAccountIdByName: traversing decoded Json of length %d", len(model))
for index, item := range model {
// need to match Account by name
if item.Name == account_name {
log.Debugf("utilityGetAccountIdByName: match Account name %q / ID %d at index %d",
item.Name, item.ID, index)
return item.ID, nil
}
}
return 0, fmt.Errorf("Cannot find account %q for the current user. Check account name and your access rights", account_name)
}
Loading…
Cancel
Save