Add files

This commit is contained in:
stSolo
2022-07-20 17:14:00 +03:00
parent 2b7f3d45f3
commit 28ceebecf8
125 changed files with 15225 additions and 735 deletions

View File

@@ -0,0 +1,46 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package kvmvm
const KvmX86CreateAPI = "/restmachine/cloudbroker/kvmx86/create"
const KvmPPCCreateAPI = "/restmachine/cloudbroker/kvmppc/create"
const ComputeGetAPI = "/restmachine/cloudbroker/compute/get"
const RgListComputesAPI = "/restmachine/cloudbroker/rg/listComputes"
const ComputeNetAttachAPI = "/restmachine/cloudbroker/compute/netAttach"
const ComputeNetDetachAPI = "/restmachine/cloudbroker/compute/netDetach"
const ComputeDiskAttachAPI = "/restmachine/cloudbroker/compute/diskAttach"
const ComputeDiskDetachAPI = "/restmachine/cloudbroker/compute/diskDetach"
const ComputeStartAPI = "/restmachine/cloudbroker/compute/start"
const ComputeStopAPI = "/restmachine/cloudbroker/compute/stop"
const ComputeResizeAPI = "/restmachine/cloudbroker/compute/resize"
const DisksResizeAPI = "/restmachine/cloudbroker/disks/resize2"
const ComputeDeleteAPI = "/restmachine/cloudbroker/compute/delete"

View File

@@ -0,0 +1,385 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package kvmvm
import (
"context"
"encoding/json"
"errors"
"fmt"
// "net/url"
"github.com/rudecs/terraform-provider-decort/internal/constants"
log "github.com/sirupsen/logrus"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
// "github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
// Parse list of all disks from API compute/get into a list of "extra disks" attached to this compute
// Extra disks are all compute disks but a boot disk.
func parseComputeDisksToExtraDisks(disks []DiskRecord) []interface{} {
// this return value will be used to d.Set("extra_disks",) item of dataSourceCompute schema,
// which is a simple list of integer disk IDs excluding boot disk ID
length := len(disks)
log.Debugf("parseComputeDisksToExtraDisks: called for %d disks", length)
if length == 0 || (length == 1 && disks[0].Type == "B") {
// the disk list is empty (which is kind of strange - diskless compute?), or
// there is only one disk in the list and it is a boot disk;
// as we skip boot disks, the result will be of 0 length anyway
return make([]interface{}, 0)
}
result := make([]interface{}, length-1)
idx := 0
for _, value := range disks {
if value.Type == "B" {
// skip boot disk when iterating over the list of disks
continue
}
result[idx] = value.ID
idx++
}
return result
}
func parseBootDiskSize(disks []DiskRecord) int {
// this return value will be used to d.Set("boot_disk_size",) item of dataSourceCompute schema
if len(disks) == 0 {
return 0
}
for _, value := range disks {
if value.Type == "B" {
return value.SizeMax
}
}
return 0
}
func parseBootDiskId(disks []DiskRecord) uint {
// this return value will be used to d.Set("boot_disk_id",) item of dataSourceCompute schema
if len(disks) == 0 {
return 0
}
for _, value := range disks {
if value.Type == "B" {
return value.ID
}
}
return 0
}
func findBootDisk(disks []DiskRecord) (*DiskRecord, error) {
for _, d := range disks {
if d.Type == "B" {
return &d, nil
}
}
return nil, errors.New("boot disk not found")
}
// Parse the list of interfaces from compute/get response into a list of networks
// attached to this compute
func parseComputeInterfacesToNetworks(ifaces []InterfaceRecord) []interface{} {
// return value will be used to d.Set("network") item of dataSourceCompute schema
length := len(ifaces)
log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length)
result := []interface{}{}
for _, value := range ifaces {
elem := make(map[string]interface{})
// Keys in this map should correspond to the Schema definition
// as returned by networkSubresourceSchemaMake()
elem["net_id"] = value.NetID
elem["net_type"] = value.NetType
elem["ip_address"] = value.IPAddress
elem["mac"] = value.MAC
// log.Debugf(" element %d: net_id=%d, net_type=%s", i, value.NetID, value.NetType)
result = append(result, elem)
}
return result
}
func flattenCompute(d *schema.ResourceData, compFacts string) error {
// This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance.
//
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceComputeExists(...) method
model := ComputeGetResp{}
log.Debugf("flattenCompute: ready to unmarshal string %s", compFacts)
err := json.Unmarshal([]byte(compFacts), &model)
if err != nil {
return err
}
log.Debugf("flattenCompute: ID %d, RG ID %d", model.ID, model.RgID)
d.SetId(fmt.Sprintf("%d", model.ID))
// d.Set("compute_id", model.ID) - we should NOT set compute_id in the schema here: if it was set - it is already set, if it wasn't - we shouldn't
d.Set("name", model.Name)
d.Set("rg_id", model.RgID)
d.Set("rg_name", model.RgName)
d.Set("account_id", model.AccountID)
d.Set("account_name", model.AccountName)
d.Set("driver", model.Driver)
d.Set("cpu", model.Cpu)
d.Set("ram", model.Ram)
// d.Set("boot_disk_size", model.BootDiskSize) - bootdiskSize key in API compute/get is always zero, so we set boot_disk_size in another way
d.Set("image_id", model.ImageID)
d.Set("description", model.Desc)
d.Set("cloud_init", "applied") // NOTE: for existing compute we hard-code this value as an indicator for DiffSuppress fucntion
// d.Set("status", model.Status)
// d.Set("tech_status", model.TechStatus)
if model.TechStatus == "STARTED" {
d.Set("started", true)
} else {
d.Set("started", false)
}
bootDisk, err := findBootDisk(model.Disks)
if err != nil {
return err
}
d.Set("boot_disk_size", bootDisk.SizeMax)
d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations
d.Set("sep_id", bootDisk.SepID)
d.Set("pool", bootDisk.Pool)
if len(model.Disks) > 0 {
log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(model.Disks))
if err = d.Set("extra_disks", parseComputeDisksToExtraDisks(model.Disks)); err != nil {
return err
}
}
if len(model.Interfaces) > 0 {
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(model.Interfaces))
if err = d.Set("network", parseComputeInterfacesToNetworks(model.Interfaces)); err != nil {
return err
}
}
if len(model.OsUsers) > 0 {
log.Debugf("flattenCompute: calling parseOsUsers for %d logins", len(model.OsUsers))
if err = d.Set("os_users", parseOsUsers(model.OsUsers)); err != nil {
return err
}
}
return nil
}
func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
compFacts, err := utilityComputeCheckPresence(ctx, d, m)
if compFacts == "" {
// if empty string is returned from utilityComputeCheckPresence then there is no
// such Compute and err tells so - just return it to the calling party
d.SetId("") // ensure ID is empty
return diag.FromErr(err)
}
if err = flattenCompute(d, compFacts); err != nil {
return diag.FromErr(err)
}
return nil
}
func DataSourceCompute() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceComputeRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Name of this compute instance. NOTE: this parameter is case sensitive.",
},
// TODO: consider removing compute_id from the schema, as it not practical to call this data provider if
// corresponding compute ID is already known
"compute_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the compute instance. If ID is specified, name and resource group ID are ignored.",
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the resource group where this compute instance is located.",
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the resource group where this compute instance is located.",
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the account this compute instance belongs to.",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the account this compute instance belongs to.",
},
"driver": {
Type: schema.TypeString,
Computed: true,
Description: "Hardware architecture of this compute instance.",
},
"cpu": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of CPUs allocated for this compute instance.",
},
"ram": {
Type: schema.TypeInt,
Computed: true,
Description: "Amount of RAM in MB allocated for this compute instance.",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the OS image this compute instance is based on.",
},
"image_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the OS image this compute instance is based on.",
},
"boot_disk_size": {
Type: schema.TypeInt,
Computed: true,
Description: "This compute instance boot disk size in GB.",
},
"boot_disk_id": {
Type: schema.TypeInt,
Computed: true,
Description: "This compute instance boot disk ID.",
},
"extra_disks": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "IDs of the extra disk(s) attached to this compute.",
},
/*
"disks": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: dataSourceDiskSchemaMake(), // ID, type, name, size, account ID, SEP ID, SEP type, pool, status, tech status, compute ID, image ID
},
Description: "Detailed specification for all disks attached to this compute instance (including bood disk).",
},
*/
"network": {
Type: schema.TypeSet,
Optional: true,
MaxItems: constants.MaxNetworksPerCompute,
Elem: &schema.Resource{
Schema: networkSubresourceSchemaMake(),
},
Description: "Network connection(s) for this compute.",
},
"os_users": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: osUsersSubresourceSchemaMake(),
},
Description: "Guest OS users provisioned on this compute instance.",
},
"description": {
Type: schema.TypeString,
Computed: true,
Description: "User-defined text description of this compute instance.",
},
"cloud_init": {
Type: schema.TypeString,
Computed: true,
Description: "Placeholder for cloud_init parameters.",
},
"started": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "Is compute started.",
},
},
}
}

View File

@@ -0,0 +1,190 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package kvmvm
type DiskRecord struct {
Acl map[string]interface{} `json:"acl"`
AccountID int `json:"accountId"`
AccountName string `json:"accountName"`
BootPartition int `json:"bootPartition"`
CreatedTime uint64 `json:"creationTime"`
ComputeID int `json:"computeId"`
ComputeName string `json:"computeName"`
DeletedTime uint64 `json:"deletionTime"`
DeviceName string `json:"devicename"`
Desc string `json:"desc"`
DestructionTime uint64 `json:"destructionTime"`
DiskPath string `json:"diskPath"`
GridID int `json:"gid"`
GUID int `json:"guid"`
ID uint `json:"id"`
ImageID int `json:"imageId"`
Images []int `json:"images"`
IOTune map[string]interface{} `json:"iotune"`
IQN string `json:"iqn"`
Login string `json:"login"`
Name string `json:"name"`
MachineId int `json:"machineId"`
MachineName string `json:"machineName"`
Milestones uint64 `json:"milestones"`
Order int `json:"order"`
Params string `json:"params"`
Passwd string `json:"passwd"`
ParentId int `json:"parentId"`
PciSlot int `json:"pciSlot"`
Pool string `json:"pool"`
PurgeTime uint64 `json:"purgeTime"`
PurgeAttempts uint64 `json:"purgeAttempts"`
RealityDeviceNumber int `json:"realityDeviceNumber"`
ReferenceId string `json:"referenceId"`
ResID string `json:"resId"`
ResName string `json:"resName"`
Role string `json:"role"`
SepType string `json:"sepType"`
SepID int `json:"sepId"` // NOTE: absent from compute/get output
SizeMax int `json:"sizeMax"`
SizeUsed int `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
Snapshots []SnapshotRecord `json:"snapshots"`
Status string `json:"status"`
TechStatus string `json:"techStatus"`
Type string `json:"type"`
UpdateBy uint64 `json:"updateBy"`
VMID int `json:"vmid"`
}
type InterfaceRecord struct {
ConnID int `json:"connId"` // This is VLAN ID or VxLAN ID, depending on ConnType
ConnType string `json:"connType"` // Either "VLAN" or "VXLAN" tag
DefaultGW string `json:"defGw"`
Guid string `json:"guid"`
IPAddress string `json:"ipAddress"` // without trailing network mask, i.e. "192.168.1.3"
MAC string `json:"mac"`
Name string `json:"name"`
NetID int `json:"netId"` // This is either ExtNet ID or ViNS ID, depending on NetType
NetMask int `json:"netMask"`
NetType string `json:"netType"` // Either "EXTNET" or "VINS" tag
PciSlot int `json:"pciSlot"`
Target string `json:"target"`
Type string `json:"type"`
VNFs []int `json:"vnfs"`
QOS InterfaceQosRecord `json:"qos"`
}
type InterfaceQosRecord struct {
ERate int `json:"eRate"`
Guid string `json:"guid"`
InBurst int `json:"inBurst"`
InRate int `json:"inRate"`
}
type SnapshotRecord struct {
Guid string `json:"guid"`
Label string `json:"label"`
ResId string `json:"resId"`
SnapSetGuid string `json:"snapSetGuid"`
SnapSetTime uint64 `json:"snapSetTime"`
TimeStamp uint64 `json:"timestamp"`
}
type SnapshotRecordList []SnapshotRecord
type ComputeGetResp struct {
// ACLs `json:"ACL"` - it is a dictionary, special parsing required
AccountID int `json:"accountId"`
AccountName string `json:"accountName"`
Arch string `json:"arch"`
BootDiskSize int `json:"bootdiskSize"`
CloneReference int `json:"cloneReference"`
Clones []int `json:"clones"`
Cpu int `json:"cpus"`
Desc string `json:"desc"`
Disks []DiskRecord `json:"disks"`
Driver string `json:"driver"`
GridID int `json:"gid"`
ID uint `json:"id"`
ImageID int `json:"imageId"`
ImageName string `json:"imageName"`
Interfaces []InterfaceRecord `json:"interfaces"`
LockStatus string `json:"lockStatus"`
ManagerID int `json:"managerId"`
ManagerType string `json:"manageType"`
Name string `json:"name"`
NatableVinsID int `json:"natableVinsId"`
NatableVinsIP string `json:"natableVinsIp"`
NatableVinsName string `json:"natableVinsName"`
NatableVinsNet string `json:"natableVinsNetwork"`
NatableVinsNetName string `json:"natableVinsNetworkName"`
OsUsers []OsUserRecord `json:"osUsers"`
Ram int `json:"ram"`
RgID int `json:"rgId"`
RgName string `json:"rgName"`
SnapSets []SnapSetRecord `json:"snapSets"`
Status string `json:"status"`
// Tags []string `json:"tags"` // Tags were reworked since DECORT 3.7.1
TechStatus string `json:"techStatus"`
TotalDiskSize int `json:"totalDiskSize"`
UpdatedBy string `json:"updatedBy"`
UpdateTime uint64 `json:"updateTime"`
UserManaged bool `json:"userManaged"`
Vgpus []int `json:"vgpus"`
VinsConnected int `json:"vinsConnected"`
VirtualImageID int `json:"virtualImageId"`
}
type OsUserRecord struct {
Guid string `json:"guid"`
Login string `json:"login"`
Password string `json:"password"`
PubKey string `json:"pubkey"`
}
type SnapSetRecord struct {
Disks []int `json:"disks"`
Guid string `json:"guid"`
Label string `json:"label"`
TimeStamp uint64 `json:"timestamp"`
}
type ComputeBriefRecord struct { // this is a brief compute specifiaction as returned by API rg/listComputes
// we do not even include here all fields as returned by this API, but only the most important that
// are really necessary to identify and distinguish computes
AccountID int `json:"accountId"`
AccountName string `json:"accountName"`
Name string `json:"name"`
ID uint `json:"id"`
RgID int `json:"rgId"`
RgName string `json:"rgName"`
Status string `json:"status"`
TechStatus string `json:"techStatus"`
}
type RgListComputesResp []ComputeBriefRecord

View File

@@ -0,0 +1,154 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package kvmvm
import (
"bytes"
"hash/fnv"
"github.com/rudecs/terraform-provider-decort/internal/statefuncs"
log "github.com/sirupsen/logrus"
"sort"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
// This is subresource of compute resource used when creating/managing compute network connections
func networkSubresIPAddreDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool {
if newVal != "" && newVal != oldVal {
log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal)
return false
}
log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=TRUE", key, oldVal, newVal)
return true // suppress difference
}
// This function is based on the original Terraform SerializeResourceForHash found
// in helper/schema/serialize.go
// It skips network subresource attributes, which are irrelevant for identification
// of unique network blocks
func networkSubresourceSerialize(output *bytes.Buffer, val interface{}, resource *schema.Resource) {
if val == nil {
return
}
rs := resource.Schema
m := val.(map[string]interface{})
keys := make([]string, 0, len(rs))
allComputed := true
for k, val := range rs {
if val.Optional || val.Required {
allComputed = false
}
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
// explicitly ignore "ip_address" when hashing
if k == "ip_address" {
continue
}
subSchema := rs[k]
// Skip attributes that are not user-provided. Computed attributes
// do not contribute to the hash since their ultimate value cannot
// be known at plan/diff time.
if !allComputed && !(subSchema.Required || subSchema.Optional) {
continue
}
output.WriteString(k)
output.WriteRune(':')
value := m[k]
schema.SerializeValueForHash(output, value, subSchema)
}
}
// HashNetworkSubresource hashes network subresource of compute resource. It uses
// specially designed networkSubresourceSerialize (see above) to make sure hashing
// does not involve attributes that we deem irrelevant to the uniqueness of network
// subresource definitions.
// It is this function that should be specified as SchemaSetFunc when creating Set
// from network subresource (e.g. in flattenCompute)
//
// This function is based on the original Terraform function HashResource from
// helper/schema/set.go
func HashNetworkSubresource(resource *schema.Resource) schema.SchemaSetFunc {
return func(v interface{}) int {
var serialized bytes.Buffer
networkSubresourceSerialize(&serialized, v, resource)
hs := fnv.New32a()
hs.Write(serialized.Bytes())
return int(hs.Sum32())
}
}
func networkSubresourceSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"net_type": {
Type: schema.TypeString,
Required: true,
StateFunc: statefuncs.StateFuncToUpper,
ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS"}, false), // observe case while validating
Description: "Type of the network for this connection, either EXTNET or VINS.",
},
"net_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the network for this connection.",
},
"ip_address": {
Type: schema.TypeString,
Optional: true,
Computed: true,
DiffSuppressFunc: networkSubresIPAddreDiffSupperss,
Description: "Optional IP address to assign to this connection. This IP should belong to the selected network and free for use.",
},
"mac": {
Type: schema.TypeString,
Computed: true,
Description: "MAC address associated with this connection. MAC address is assigned automatically.",
},
}
return rets
}

View File

@@ -0,0 +1,86 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package kvmvm
import (
log "github.com/sirupsen/logrus"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func parseOsUsers(logins []OsUserRecord) []interface{} {
var result = make([]interface{}, len(logins))
for index, value := range logins {
elem := make(map[string]interface{})
elem["guid"] = value.Guid
elem["login"] = value.Login
elem["password"] = value.Password
elem["public_key"] = value.PubKey
result[index] = elem
log.Debugf("parseOsUsers: parsed element %d - login %q", index, value.Login)
}
return result
}
func osUsersSubresourceSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "GUID of this guest OS user.",
},
"login": {
Type: schema.TypeString,
Computed: true,
Description: "Login name of this guest OS user.",
},
"password": {
Type: schema.TypeString,
Computed: true,
Sensitive: true,
Description: "Password of this guest OS user.",
},
"public_key": {
Type: schema.TypeString,
Computed: true,
Description: "SSH public key of this guest OS user.",
},
}
return rets
}

View File

@@ -0,0 +1,542 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package kvmvm
import (
"context"
"encoding/json"
"fmt"
"net/url"
"strconv"
"github.com/rudecs/terraform-provider-decort/internal/constants"
"github.com/rudecs/terraform-provider-decort/internal/controller"
"github.com/rudecs/terraform-provider-decort/internal/statefuncs"
log "github.com/sirupsen/logrus"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
func cloudInitDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool {
if oldVal == "" && newVal != "applied" {
// if old value for "cloud_init" resource is empty string, it means that we are creating new compute
// and there is a chance that the user will want custom cloud init parameters - so we check if
// cloud_init is explicitly set in TF file by making sure that its new value is different from "applied",
// which is a reserved key word.
log.Debugf("cloudInitDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal)
return false // there is a difference between stored and new value
}
log.Debugf("cloudInitDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=TRUE", key, oldVal, newVal)
return true // suppress difference
}
func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
// we assume all mandatory parameters it takes to create a comptue instance are properly
// specified - we rely on schema "Required" attributes to let Terraform validate them for us
log.Debugf("resourceComputeCreate: called for Compute name %q, RG ID %d", d.Get("name").(string), d.Get("rg_id").(int))
// create basic Compute (i.e. without extra disks and network connections - those will be attached
// by subsequent individual API calls).
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("rgId", fmt.Sprintf("%d", d.Get("rg_id").(int)))
urlValues.Add("name", d.Get("name").(string))
urlValues.Add("cpu", fmt.Sprintf("%d", d.Get("cpu").(int)))
urlValues.Add("ram", fmt.Sprintf("%d", d.Get("ram").(int)))
urlValues.Add("imageId", fmt.Sprintf("%d", d.Get("image_id").(int)))
urlValues.Add("bootDisk", fmt.Sprintf("%d", d.Get("boot_disk_size").(int)))
urlValues.Add("netType", "NONE") // at the 1st step create isolated compute
urlValues.Add("start", "0") // at the 1st step create compute in a stopped state
argVal, argSet := d.GetOk("description")
if argSet {
urlValues.Add("desc", argVal.(string))
}
if sepID, ok := d.GetOk("sep_id"); ok {
urlValues.Add("sepId", strconv.Itoa(sepID.(int)))
}
if pool, ok := d.GetOk("pool"); ok {
urlValues.Add("pool", pool.(string))
}
/*
sshKeysVal, sshKeysSet := d.GetOk("ssh_keys")
if sshKeysSet {
// process SSH Key settings and set API values accordingly
log.Debugf("resourceComputeCreate: calling makeSshKeysArgString to setup SSH keys for guest login(s)")
urlValues.Add("userdata", makeSshKeysArgString(sshKeysVal.([]interface{})))
}
*/
computeCreateAPI := KvmX86CreateAPI
driver := d.Get("driver").(string)
if driver == "KVM_PPC" {
computeCreateAPI = KvmPPCCreateAPI
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM PowerPC")
} else { // note that we do not validate arch value for explicit "KVM_X86" here
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
}
argVal, argSet = d.GetOk("cloud_init")
if argSet {
// userdata must not be empty string and must not be a reserved keyword "applied"
userdata := argVal.(string)
if userdata != "" && userdata != "applied" {
urlValues.Add("userdata", userdata)
}
}
apiResp, err := c.DecortAPICall(ctx, "POST", computeCreateAPI, urlValues)
if err != nil {
return diag.FromErr(err)
}
// Compute create API returns ID of the new Compute instance on success
d.SetId(apiResp) // update ID of the resource to tell Terraform that the resource exists, albeit partially
compId, _ := strconv.Atoi(apiResp)
log.Debugf("resourceComputeCreate: new simple Compute ID %d, name %s created", compId, d.Get("name").(string))
// Configure data disks if any
argVal, argSet = d.GetOk("extra_disks")
if argSet && argVal.(*schema.Set).Len() > 0 {
// urlValues.Add("desc", argVal.(string))
log.Debugf("resourceComputeCreate: calling utilityComputeExtraDisksConfigure to attach %d extra disk(s)", argVal.(*schema.Set).Len())
err = utilityComputeExtraDisksConfigure(ctx, d, m, false) // do_delta=false, as we are working on a new compute
if err != nil {
log.Errorf("resourceComputeCreate: error when attaching extra disk(s) to a new Compute ID %d: %v", compId, err)
return diag.FromErr(err)
}
}
// Configure external networks if any
argVal, argSet = d.GetOk("network")
if argSet && argVal.(*schema.Set).Len() > 0 {
log.Debugf("resourceComputeCreate: calling utilityComputeNetworksConfigure to attach %d network(s)", argVal.(*schema.Set).Len())
err = utilityComputeNetworksConfigure(ctx, d, m, false) // do_delta=false, as we are working on a new compute
if err != nil {
log.Errorf("resourceComputeCreate: error when attaching networks to a new Compute ID %d: %s", compId, err)
return diag.FromErr(err)
}
}
// Note bene: we created compute in a STOPPED state (this is required to properly attach 1st network interface),
// now we need to start it before we report the sequence complete
if d.Get("started").(bool) {
reqValues := &url.Values{}
reqValues.Add("computeId", fmt.Sprintf("%d", compId))
log.Debugf("resourceComputeCreate: starting Compute ID %d after completing its resource configuration", compId)
if _, err := c.DecortAPICall(ctx, "POST", ComputeStartAPI, reqValues); err != nil {
return diag.FromErr(err)
}
}
log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", compId, d.Get("name").(string))
// We may reuse dataSourceComputeRead here as we maintain similarity
// between Compute resource and Compute data source schemas
// Compute read function will also update resource ID on success, so that Terraform
// will know the resource exists
return dataSourceComputeRead(ctx, d, m)
}
func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceComputeRead: called for Compute name %s, RG ID %d",
d.Get("name").(string), d.Get("rg_id").(int))
compFacts, err := utilityComputeCheckPresence(ctx, d, m)
if compFacts == "" {
if err != nil {
return diag.FromErr(err)
}
// Compute with such name and RG ID was not found
return nil
}
if err = flattenCompute(d, compFacts); err != nil {
return diag.FromErr(err)
}
log.Debugf("resourceComputeRead: after flattenCompute: Compute ID %s, name %q, RG ID %d",
d.Id(), d.Get("name").(string), d.Get("rg_id").(int))
return nil
}
func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceComputeUpdate: called for Compute ID %s / name %s, RGID %d",
d.Id(), d.Get("name").(string), d.Get("rg_id").(int))
c := m.(*controller.ControllerCfg)
/*
1. Resize CPU/RAM
2. Resize (grow) boot disk
3. Update extra disks
4. Update networks
5. Start/stop
*/
// 1. Resize CPU/RAM
params := &url.Values{}
doUpdate := false
params.Add("computeId", d.Id())
oldCpu, newCpu := d.GetChange("cpu")
if oldCpu.(int) != newCpu.(int) {
params.Add("cpu", fmt.Sprintf("%d", newCpu.(int)))
doUpdate = true
} else {
params.Add("cpu", "0") // no change to CPU allocation
}
oldRam, newRam := d.GetChange("ram")
if oldRam.(int) != newRam.(int) {
params.Add("ram", fmt.Sprintf("%d", newRam.(int)))
doUpdate = true
} else {
params.Add("ram", "0")
}
if doUpdate {
log.Debugf("resourceComputeUpdate: changing CPU %d -> %d and/or RAM %d -> %d",
oldCpu.(int), newCpu.(int),
oldRam.(int), newRam.(int))
params.Add("force", "true")
_, err := c.DecortAPICall(ctx, "POST", ComputeResizeAPI, params)
if err != nil {
return diag.FromErr(err)
}
}
// 2. Resize (grow) Boot disk
oldSize, newSize := d.GetChange("boot_disk_size")
if oldSize.(int) < newSize.(int) {
bdsParams := &url.Values{}
bdsParams.Add("diskId", fmt.Sprintf("%d", d.Get("boot_disk_id").(int)))
bdsParams.Add("size", fmt.Sprintf("%d", newSize.(int)))
log.Debugf("resourceComputeUpdate: compute ID %s, boot disk ID %d resize %d -> %d",
d.Id(), d.Get("boot_disk_id").(int), oldSize.(int), newSize.(int))
_, err := c.DecortAPICall(ctx, "POST", DisksResizeAPI, bdsParams)
if err != nil {
return diag.FromErr(err)
}
} else if oldSize.(int) > newSize.(int) {
log.Warnf("resourceComputeUpdate: compute ID %s - shrinking boot disk is not allowed", d.Id())
}
// 3. Calculate and apply changes to data disks
err := utilityComputeExtraDisksConfigure(ctx, d, m, true) // pass do_delta = true to apply changes, if any
if err != nil {
return diag.FromErr(err)
}
// 4. Calculate and apply changes to network connections
err = utilityComputeNetworksConfigure(ctx, d, m, true) // pass do_delta = true to apply changes, if any
if err != nil {
return diag.FromErr(err)
}
if d.HasChange("started") {
params := &url.Values{}
params.Add("computeId", d.Id())
if d.Get("started").(bool) {
if _, err := c.DecortAPICall(ctx, "POST", ComputeStartAPI, params); err != nil {
return diag.FromErr(err)
}
} else {
if _, err := c.DecortAPICall(ctx, "POST", ComputeStopAPI, params); err != nil {
return diag.FromErr(err)
}
}
}
// we may reuse dataSourceComputeRead here as we maintain similarity
// between Compute resource and Compute data source schemas
return dataSourceComputeRead(ctx, d, m)
}
func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
// NOTE: this function destroys target Compute instance "permanently", so
// there is no way to restore it.
// If compute being destroyed has some extra disks attached, they are
// detached from the compute
log.Debugf("resourceComputeDelete: called for Compute name %s, RG ID %d",
d.Get("name").(string), d.Get("rg_id").(int))
compFacts, err := utilityComputeCheckPresence(ctx, d, m)
if compFacts == "" {
if err != nil {
return diag.FromErr(err)
}
// the target Compute does not exist - in this case according to Terraform best practice
// we exit from Destroy method without error
return nil
}
c := m.(*controller.ControllerCfg)
model := ComputeGetResp{}
log.Debugf("resourceComputeDelete: ready to unmarshal string %s", compFacts)
err = json.Unmarshal([]byte(compFacts), &model)
if err == nil && len(model.Disks) > 0 {
// prepare to detach data disks from compute - do it only if compFacts unmarshalled
// properly and the resulting model contains non-empty Disks list
for _, diskFacts := range model.Disks {
if diskFacts.Type == "B" {
// boot disk is never detached on compute delete
continue
}
log.Debugf("resourceComputeDelete: ready to detach data disk ID %d from compute ID %s", diskFacts.ID, d.Id())
detachParams := &url.Values{}
detachParams.Add("computeId", d.Id())
detachParams.Add("diskId", fmt.Sprintf("%d", diskFacts.ID))
_, err = c.DecortAPICall(ctx, "POST", ComputeDiskDetachAPI, detachParams)
if err != nil {
// We do not fail compute deletion on data disk detach errors
log.Errorf("resourceComputeDelete: error when detaching Disk ID %d: %s", diskFacts.ID, err)
}
}
}
params := &url.Values{}
params.Add("computeId", d.Id())
params.Add("permanently", "1")
// TODO: this is for the upcoming API update - params.Add("detachdisks", "1")
_, err = c.DecortAPICall(ctx, "POST", ComputeDeleteAPI, params)
if err != nil {
return diag.FromErr(err)
}
return nil
}
func resourceComputeExists(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
// Reminder: according to Terraform rules, this function should not modify its ResourceData argument
log.Debugf("resourceComputeExist: called for Compute name %s, RG ID %d",
d.Get("name").(string), d.Get("rg_id").(int))
compFacts, err := utilityComputeCheckPresence(ctx, d, m)
if compFacts == "" {
if err != nil {
return false, err
}
return false, nil
}
return true, nil
}
func ResourceCompute() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourceComputeCreate,
ReadContext: resourceComputeRead,
UpdateContext: resourceComputeUpdate,
DeleteContext: resourceComputeDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout180s,
Read: &constants.Timeout30s,
Update: &constants.Timeout180s,
Delete: &constants.Timeout60s,
Default: &constants.Timeout60s,
},
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.",
},
"rg_id": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the resource group where this compute should be deployed.",
},
"driver": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
StateFunc: statefuncs.StateFuncToUpper,
ValidateFunc: validation.StringInSlice([]string{"KVM_X86", "KVM_PPC"}, false), // observe case while validating
Description: "Hardware architecture of this compute instance.",
},
"cpu": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntBetween(1, constants.MaxCpusPerCompute),
Description: "Number of CPUs to allocate to this compute instance.",
},
"ram": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(constants.MinRamPerCompute),
Description: "Amount of RAM in MB to allocate to this compute instance.",
},
"image_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the OS image to base this compute instance on.",
},
"boot_disk_size": {
Type: schema.TypeInt,
Required: true,
Description: "This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image.",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
Description: "ID of SEP to create bootDisk on. Uses image's sepId if not set.",
},
"pool": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
Description: "Pool to use if sepId is set, can be also empty if needed to be chosen by system.",
},
"extra_disks": {
Type: schema.TypeSet,
Optional: true,
MaxItems: constants.MaxExtraDisksPerCompute,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "Optional list of IDs of extra disks to attach to this compute. You may specify several extra disks.",
},
"network": {
Type: schema.TypeSet,
Optional: true,
MaxItems: constants.MaxNetworksPerCompute,
Elem: &schema.Resource{
Schema: networkSubresourceSchemaMake(),
},
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.",
},
/*
"ssh_keys": {
Type: schema.TypeList,
Optional: true,
MaxItems: MaxSshKeysPerCompute,
Elem: &schema.Resource{
Schema: sshSubresourceSchemaMake(),
},
Description: "SSH keys to authorize on this compute instance.",
},
*/
"description": {
Type: schema.TypeString,
Optional: true,
Description: "Optional text description of this compute instance.",
},
"cloud_init": {
Type: schema.TypeString,
Optional: true,
Default: "applied",
DiffSuppressFunc: cloudInitDiffSupperss,
Description: "Optional cloud_init parameters. Applied when creating new compute instance only, ignored in all other cases.",
},
// The rest are Compute properties, which are "computed" once it is created
"rg_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the resource group where this compute instance is located.",
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the account this compute instance belongs to.",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the account this compute instance belongs to.",
},
"boot_disk_id": {
Type: schema.TypeInt,
Computed: true,
Description: "This compute instance boot disk ID.",
},
"os_users": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: osUsersSubresourceSchemaMake(),
},
Description: "Guest OS users provisioned on this compute instance.",
},
"started": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "Is compute started.",
},
},
}
}

View File

@@ -0,0 +1,310 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package kvmvm
import (
"context"
"encoding/json"
"fmt"
"net/url"
"strconv"
"github.com/rudecs/terraform-provider-decort/internal/controller"
log "github.com/sirupsen/logrus"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error {
// d is filled with data according to computeResource schema, so extra disks config is retrieved via "extra_disks" key
// If do_delta is true, this function will identify changes between new and existing specs for extra disks and try to
// update compute configuration accordingly
// Otherwise it will apply whatever is found in the new set of "extra_disks" right away.
// Primary use of do_delta=false is when calling this function from compute Create handler.
// Note that this function will not abort on API errors, but will continue to configure (attach / detach) other individual
// disks via atomic API calls. However, it will not retry failed manipulation on the same disk.
c := m.(*controller.ControllerCfg)
log.Debugf("utilityComputeExtraDisksConfigure: called for Compute ID %s with do_delta = %t", d.Id(), do_delta)
// NB: as of rc-1.25 "extra_disks" are TypeSet with the elem of TypeInt
old_set, new_set := d.GetChange("extra_disks")
apiErrCount := 0
var lastSavedError error
if !do_delta {
if new_set.(*schema.Set).Len() < 1 {
return nil
}
for _, disk := range new_set.(*schema.Set).List() {
urlValues := &url.Values{}
urlValues.Add("computeId", d.Id())
urlValues.Add("diskId", fmt.Sprintf("%d", disk.(int)))
_, err := c.DecortAPICall(ctx, "POST", ComputeDiskAttachAPI, urlValues)
if err != nil {
// failed to attach extra disk - partial resource update
apiErrCount++
lastSavedError = err
}
}
if apiErrCount > 0 {
log.Errorf("utilityComputeExtraDisksConfigure: there were %d error(s) when attaching disks to Compute ID %s. Last error was: %s",
apiErrCount, d.Id(), lastSavedError)
return lastSavedError
}
return nil
}
detach_set := old_set.(*schema.Set).Difference(new_set.(*schema.Set))
log.Debugf("utilityComputeExtraDisksConfigure: detach set has %d items for Compute ID %s", detach_set.Len(), d.Id())
for _, diskId := range detach_set.List() {
urlValues := &url.Values{}
urlValues.Add("computeId", d.Id())
urlValues.Add("diskId", fmt.Sprintf("%d", diskId.(int)))
_, err := c.DecortAPICall(ctx, "POST", ComputeDiskDetachAPI, urlValues)
if err != nil {
// failed to detach disk - there will be partial resource update
log.Errorf("utilityComputeExtraDisksConfigure: failed to detach disk ID %d from Compute ID %s: %s", diskId.(int), d.Id(), err)
apiErrCount++
lastSavedError = err
}
}
attach_set := new_set.(*schema.Set).Difference(old_set.(*schema.Set))
log.Debugf("utilityComputeExtraDisksConfigure: attach set has %d items for Compute ID %s", attach_set.Len(), d.Id())
for _, diskId := range attach_set.List() {
urlValues := &url.Values{}
urlValues.Add("computeId", d.Id())
urlValues.Add("diskId", fmt.Sprintf("%d", diskId.(int)))
_, err := c.DecortAPICall(ctx, "POST", ComputeDiskAttachAPI, urlValues)
if err != nil {
// failed to attach disk - there will be partial resource update
log.Errorf("utilityComputeExtraDisksConfigure: failed to attach disk ID %d to Compute ID %s: %s", diskId.(int), d.Id(), err)
apiErrCount++
lastSavedError = err
}
}
if apiErrCount > 0 {
log.Errorf("utilityComputeExtraDisksConfigure: there were %d error(s) when managing disks of Compute ID %s. Last error was: %s",
apiErrCount, d.Id(), lastSavedError)
return lastSavedError
}
return nil
}
func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error {
// "d" is filled with data according to computeResource schema, so extra networks config is retrieved via "network" key
// If do_delta is true, this function will identify changes between new and existing specs for network and try to
// update compute configuration accordingly
// Otherwise it will apply whatever is found in the new set of "network" right away.
// Primary use of do_delta=false is when calling this function from compute Create handler.
c := m.(*controller.ControllerCfg)
old_set, new_set := d.GetChange("network")
apiErrCount := 0
var lastSavedError error
if !do_delta {
if new_set.(*schema.Set).Len() < 1 {
return nil
}
for _, runner := range new_set.(*schema.Set).List() {
urlValues := &url.Values{}
net_data := runner.(map[string]interface{})
urlValues.Add("computeId", d.Id())
urlValues.Add("netType", net_data["net_type"].(string))
urlValues.Add("netId", fmt.Sprintf("%d", net_data["net_id"].(int)))
ipaddr, ipSet := net_data["ip_address"] // "ip_address" key is optional
if ipSet {
urlValues.Add("ipAddr", ipaddr.(string))
}
_, err := c.DecortAPICall(ctx, "POST", ComputeNetAttachAPI, urlValues)
if err != nil {
// failed to attach network - partial resource update
apiErrCount++
lastSavedError = err
}
}
if apiErrCount > 0 {
log.Errorf("utilityComputeNetworksConfigure: there were %d error(s) when managing networks of Compute ID %s. Last error was: %s",
apiErrCount, d.Id(), lastSavedError)
return lastSavedError
}
return nil
}
detach_set := old_set.(*schema.Set).Difference(new_set.(*schema.Set))
log.Debugf("utilityComputeNetworksConfigure: detach set has %d items for Compute ID %s", detach_set.Len(), d.Id())
for _, runner := range detach_set.List() {
urlValues := &url.Values{}
net_data := runner.(map[string]interface{})
urlValues.Add("computeId", d.Id())
urlValues.Add("ipAddr", net_data["ip_address"].(string))
urlValues.Add("mac", net_data["mac"].(string))
_, err := c.DecortAPICall(ctx, "POST", ComputeNetDetachAPI, urlValues)
if err != nil {
// failed to detach this network - there will be partial resource update
log.Errorf("utilityComputeNetworksConfigure: failed to detach net ID %d of type %s from Compute ID %s: %s",
net_data["net_id"].(int), net_data["net_type"].(string), d.Id(), err)
apiErrCount++
lastSavedError = err
}
}
attach_set := new_set.(*schema.Set).Difference(old_set.(*schema.Set))
log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", attach_set.Len(), d.Id())
for _, runner := range attach_set.List() {
urlValues := &url.Values{}
net_data := runner.(map[string]interface{})
urlValues.Add("computeId", d.Id())
urlValues.Add("netId", fmt.Sprintf("%d", net_data["net_id"].(int)))
urlValues.Add("netType", net_data["net_type"].(string))
if net_data["ip_address"].(string) != "" {
urlValues.Add("ipAddr", net_data["ip_address"].(string))
}
_, err := c.DecortAPICall(ctx, "POST", ComputeNetAttachAPI, urlValues)
if err != nil {
// failed to attach this network - there will be partial resource update
log.Errorf("utilityComputeNetworksConfigure: failed to attach net ID %d of type %s to Compute ID %s: %s",
net_data["net_id"].(int), net_data["net_type"].(string), d.Id(), err)
apiErrCount++
lastSavedError = err
}
}
if apiErrCount > 0 {
log.Errorf("utilityComputeNetworksConfigure: there were %d error(s) when managing networks of Compute ID %s. Last error was: %s",
apiErrCount, d.Id(), lastSavedError)
return lastSavedError
}
return nil
}
func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) {
// This function tries to locate Compute by one of the following approaches:
// - if compute_id is specified - locate by compute ID
// - if compute_name is specified - locate by a combination of compute name and resource
// group ID
//
// If succeeded, it returns non-empty string that contains JSON formatted facts about the
// Compute as returned by compute/get API call.
// Otherwise it returns empty string and meaningful error.
//
// This function does not modify its ResourceData argument, so it is safe to use it as core
// method for resource's Exists method.
//
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
// make it possible to use "read" & "check presence" functions with compute ID set so
// that Import of Compute resource is possible
idSet := false
theId, err := strconv.Atoi(d.Id())
if err != nil || theId <= 0 {
computeId, argSet := d.GetOk("compute_id") // NB: compute_id is NOT present in computeResource schema!
if argSet {
theId = computeId.(int)
idSet = true
}
} else {
idSet = true
}
if idSet {
// compute ID is specified, try to get compute instance straight by this ID
log.Debugf("utilityComputeCheckPresence: locating compute by its ID %d", theId)
urlValues.Add("computeId", fmt.Sprintf("%d", theId))
computeFacts, err := c.DecortAPICall(ctx, "POST", ComputeGetAPI, urlValues)
if err != nil {
return "", err
}
return computeFacts, nil
}
// ID was not set in the schema upon entering this function - work through Compute name
// and RG ID
computeName, argSet := d.GetOk("name")
if !argSet {
return "", fmt.Errorf("Cannot locate compute instance if name is empty and no compute ID specified")
}
rgId, argSet := d.GetOk("rg_id")
if !argSet {
return "", fmt.Errorf("Cannot locate compute by name %s if no resource group ID is set", computeName.(string))
}
urlValues.Add("rgId", fmt.Sprintf("%d", rgId))
apiResp, err := c.DecortAPICall(ctx, "POST", RgListComputesAPI, urlValues)
if err != nil {
return "", err
}
log.Debugf("utilityComputeCheckPresence: ready to unmarshal string %s", apiResp)
computeList := RgListComputesResp{}
err = json.Unmarshal([]byte(apiResp), &computeList)
if err != nil {
return "", err
}
// log.Printf("%#v", computeList)
log.Debugf("utilityComputeCheckPresence: traversing decoded JSON of length %d", len(computeList))
for index, item := range computeList {
// need to match Compute by name, skip Computes with the same name in DESTROYED satus
if item.Name == computeName.(string) && item.Status != "DESTROYED" {
log.Debugf("utilityComputeCheckPresence: index %d, matched name %s", index, item.Name)
// we found the Compute we need - now get detailed information via compute/get API
cgetValues := &url.Values{}
cgetValues.Add("computeId", fmt.Sprintf("%d", item.ID))
apiResp, err = c.DecortAPICall(ctx, "POST", ComputeGetAPI, cgetValues)
if err != nil {
return "", err
}
return apiResp, nil
}
}
return "", nil // there should be no error if Compute does not exist
}