Adding data_source_disk code and implementing collateral changes. No testing yet.

rc-1.0
Sergey Shubin svs1370 4 years ago
parent 1490c543de
commit 4f617334be

@ -205,7 +205,7 @@ func dataSourceCompute() *schema.Resource {
Type: schema.TypeList, Type: schema.TypeList,
Computed: true, Computed: true,
Elem: &schema.Resource { Elem: &schema.Resource {
Schema: diskSubresourceSchema(), // ID, type, name, size, account ID, SEP ID, SEP type, pool, status, tech status, compute ID, image ID Schema: dataSourceDiskSchemaMake(), // ID, type, name, size, account ID, SEP ID, SEP type, pool, status, tech status, compute ID, image ID
}, },
Description: "Detailed specification for all disks attached to this compute instance (including bood disk).", Description: "Detailed specification for all disks attached to this compute instance (including bood disk).",
}, },

@ -0,0 +1,204 @@
/*
Copyright (c) 2019-2021 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file is part of Terraform (by Hashicorp) provider for Digital Energy Cloud Orchestration
Technology platfom.
Visit https://github.com/rudecs/terraform-provider-decort for full source code package and updates.
*/
package decort
import (
"encoding/json"
"fmt"
"log"
// "net/url"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
)
func flattenDisk(d *schema.ResourceData, disk_facts string) error {
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceComputeExists(...) method
model := DiskRecord{}
log.Debugf("flattenDisk: ready to unmarshal string %q", disk_facts)
err := json.Unmarshal([]byte(disk_facts), &model)
if err != nil {
return err
}
log.Debugf("flattenDisk: disk ID %d, disk AccountID %d", model.ID, model.AccountID)
d.SetId(fmt.Sprintf("%d", model.ID))
d.Set("disk_id", model.ID)
d.Set("name", model.Name)
d.Set("account_id", model.AccountID)
d.Set("account_name", model.AccountName)
d.Set("size", model.SizeMax)
d.Set("type", model.Type)
d.Set("image_id", model.ImageID)
d.Set("sep_id", model.SepID)
d.Set("sep_type", model.SepType)
d.Set("pool", model.Pool)
d.Set("compute_id", model.ComputeID)
d.Set("description", model.Desc)
d.Set("status", model.Status)
d.Set("tech_status", model.TechStatus)
/* we do not manage snapshots via Terraform yet, so keep this commented out for a while
if len(model.Snapshots) > 0 {
log.Debugf("flattenDisk: calling flattenDiskSnapshots")
if err = d.Set("nics", flattenDiskSnapshots(model.Snapshots)); err != nil {
return err
}
}
*/
return nil
}
func dataSourceDiskRead(d *schema.ResourceData, m interface{}) error {
disk_facts, err := utilityDiskCheckPresence(d, m)
if disk_facts == "" {
// if empty string is returned from utilityDiskCheckPresence then there is no
// such Disk and err tells so - just return it to the calling party
d.SetId("") // ensure ID is empty
return err
}
return flattenDisk(d, disk_facts)
}
func dataSourceDiskSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema {
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Name of this disk. NOTE: disk names are NOT unique within an account.",
},
"disk_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the disk to get. If disk ID is specified, then name, account and account ID are ignored.",
},
"account_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the account this disk belongs to.",
},
"account_name": {
Type: schema.TypeString,
Optional: true,
Description: "Name of the account this disk belongs to. If account ID is specified, account name is ignored.",
},
"description": {
Type: schema.TypeString,
Computed: true,
Description: "User-defined text description of this disk.",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the image, which this disk was cloned from.",
},
"size": {
Type: schema.TypeInt,
Computed: true,
Description: "Size of the disk in GB.",
},
"type": {
Type: schema.TypeString,
Computed: true,
Description: "Type of this disk.",
},
/*
"snapshots": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource {
Schema: snapshotSubresourceSchemaMake(),
},
Description: "List of user-created snapshots for this disk."
},
*/
"sep_id": {
Type: schema.TypeString,
Computed: true,
Description: "Storage end-point provider serving this disk.",
},
"sep_type": {
Type: schema.TypeString,
Computed: true,
Description: "Type of the storage end-point provider serving this disk.",
},
"pool": {
Type: schema.TypeString,
Computed: true,
Description: "Pool where this disk is located.",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Current model status of this disk.",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "Current technical status of this disk.",
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the compute instance where this disk is attached to, or 0 for unattached disk.",
},
}
return ret
}
func dataSourceDisk() *schema.Resource {
return &schema.Resource {
SchemaVersion: 1,
Read: dataSourceDiskRead,
Timeouts: &schema.ResourceTimeout {
Read: &Timeout30s,
Default: &Timeout60s,
},
Schema: dataSourceDiskSchemaMake(),
}

@ -168,7 +168,7 @@ func dataSourceResgroup() *schema.Resource {
Elem: &schema.Schema { Elem: &schema.Schema {
Type: schema.TypeInt, Type: schema.TypeInt,
}, },
Description: "List of computes deployed in this resource group." Description: "List of computes deployed in this resource group.",
}, },
}, },
} }

@ -95,7 +95,7 @@ func diskSubresourceSchema() map[string]*schema.Schema {
"name": { "name": {
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Required: true,
Description: "Name of this disk resource.", Description: "Name of this disk.",
}, },
"size": { "size": {
@ -108,28 +108,34 @@ func diskSubresourceSchema() map[string]*schema.Schema {
"account_id": { "account_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "ID of the account this disk resource belongs to.", Description: "ID of the account this disk belongs to.",
},
"type": {
Type: schema.TypeString,
Optional: true,
Description: "Type of this disk.",
}, },
"sep_id": { "sep_id": {
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Optional: true,
Default: "default", Default: "default",
Description: "Storage provider (storage technology type) by which this disk should be served.", Description: "ID of the storage end-point provider serving this disk.",
}, },
"sep_type": { "sep_type": {
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Optional: true,
Default: "default", Default: "default",
Description: "Storage provider (storage technology type) by which this disk should be served.", Description: "Type of the storage provider serving this disk.",
}, },
"pool": { "pool": {
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Optional: true,
Default: "default", Default: "default",
Description: "Pool from which this disk should be provisioned.", Description: "Pool on the storage where this disk is located.",
}, },
"image_id": { "image_id": {

@ -290,10 +290,11 @@ type DiskRecord struct {
// ACLs `json:"ACL"` - it is a dictionary, special parsing required // ACLs `json:"ACL"` - it is a dictionary, special parsing required
// was - Acl map[string]string `json:"acl"` // was - Acl map[string]string `json:"acl"`
AccountID int `json:"accountId"` AccountID int `json:"accountId"`
AccountName string `json:"accountName"` // NOTE: absent from compute/get output
BootPartition int `json:"bootPartition"` BootPartition int `json:"bootPartition"`
CreatedTime uint64 `json:"creationTime"` CreatedTime uint64 `json:"creationTime"`
DeletedTime uint64 `json:"deletionTime"` DeletedTime uint64 `json:"deletionTime"`
Description string `json:"descr"` Desc string `json:"descr"`
DestructionTime uint64 `json:"destructionTime"` DestructionTime uint64 `json:"destructionTime"`
DiskPath string `json:"diskPath"` DiskPath string `json:"diskPath"`
GridID int `json:"gid"` GridID int `json:"gid"`
@ -302,6 +303,7 @@ type DiskRecord struct {
Images []int `json:"images"` Images []int `json:"images"`
// IOTune 'json:"iotune" - it is a dictionary // IOTune 'json:"iotune" - it is a dictionary
Name string `json:"name"` Name string `json:"name"`
// Order `json:"order"`
ParentId int `json:"parentId"` ParentId int `json:"parentId"`
PciSlot int `json:"pciSlot"` PciSlot int `json:"pciSlot"`
// ResID string `json:"resId"` // ResID string `json:"resId"`
@ -313,12 +315,12 @@ type DiskRecord struct {
SepType string `json:"sepType"` SepType string `json:"sepType"`
SepID int `json:"sepid"` SepID int `json:"sepid"`
SizeMax int `json:"sizeMax"` SizeMax int `json:"sizeMax"`
SizeUsed int `json:"sizeUsed"` SizeUsed int `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
Snapshots []SnapshotRecord `json:"snapshots"` Snapshots []SnapshotRecord `json:"snapshots"`
Status string `json:"status"` Status string `json:"status"`
TechStatus string `json:"techStatus"` TechStatus string `json:"techStatus"`
Type string `json:"type"` Type string `json:"type"`
ComputeID int `json:"vmId"` ComputeID int `json:"vmid"`
} }
type OsUserRecord struct { type OsUserRecord struct {
@ -520,3 +522,5 @@ type DisksGetParam struct {
const DisksCreateAPI = "/restmachine/cloudapi/disks/create" const DisksCreateAPI = "/restmachine/cloudapi/disks/create"
const DisksGetAPI = "/restmachine/cloudapi/disks/get" // Returns single DiskRecord on success const DisksGetAPI = "/restmachine/cloudapi/disks/get" // Returns single DiskRecord on success
const DisksListAPI = "/restmachine/cloudapi/disks/list" // Returns list of DiskRecord on success

@ -363,7 +363,7 @@ func resourceResgroup() *schema.Resource {
"desc": { "desc": {
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Optional: true,
Description: "User-defined text description of this resource group." Description: "User-defined text description of this resource group.",
}, },
"status": { "status": {
@ -379,22 +379,22 @@ func resourceResgroup() *schema.Resource {
}, },
"vins": { "vins": {
Type: schema.TypeList, Type: schema.TypeList, // this is a list of ints
Computed: true, Computed: true,
MaxItems: LimitMaxVinsPerResgroup, MaxItems: LimitMaxVinsPerResgroup,
Elem: &schema.Resource { Elem: &schema.Schema {
Schema: vinsRgSubresourceSchema() // this is a list of ints Type: schema.TypeInt,
}, },
Description: "List of VINs deployed in this resource group.", Description: "List of VINs deployed in this resource group.",
}, },
"computes": { "computes": {
Type: schema.TypeList, Type: schema.TypeList, // this is a list of ints
Computed: true, Computed: true,
Elem: &schema.Resource { Elem: &schema.Schema {
Schema: computesRgSubresourceSchema() //this is a list of ints Type: schema.TypeInt,
}, },
Description: "List of computes deployed in this resource group." Description: "List of computes deployed in this resource group.",
}, },
}, },
} }

@ -0,0 +1,120 @@
/*
Copyright (c) 2019-2021 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Author: Sergey Shubin, <sergey.shubin@digitalenergy.online>, <svs1370@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file is part of Terraform (by Hashicorp) provider for Digital Energy Cloud Orchestration
Technology platfom.
Visit https://github.com/rudecs/terraform-provider-decort for full source code package and updates.
*/
package decort
import (
"encoding/json"
"fmt"
"net/url"
"strconv"
"github.com/hashicorp/terraform/helper/schema"
// "github.com/hashicorp/terraform/helper/validation"
)
func utilityDiskCheckPresence(d *schema.ResourceData, m interface{}) (string, error) {
// This function tries to locate Disk by one of the following algorithms depending on
// the parameters passed:
// - if disk ID is specified -> by disk ID
// - if disk name is specifeid -> by disk name and either account ID or account name
//
// NOTE: disk names are not unique, so the first occurence of this name in the account will
// be returned. There is no such ambiguity when locating disk by its ID.
//
// If succeeded, it returns non empty string that contains JSON formatted facts about the disk
// as returned by disks/get API call.
// Otherwise it returns empty string and meaningful error.
//
// This function does not modify its ResourceData argument, so it is safe to use it as core
// method for resource's Exists method.
//
controller := m.(*ControllerCfg)
url_values := &url.Values{}
disk_id, arg_set := d.GetOk("disk_id")
if arg_set {
// go straight for the disk by its ID
log.Debugf("utilityDiskCheckPresence: locating disk by its ID %d", disk_id.(int))
url_values.Add("diskId", fmt.Sprintf("%d", disk_id.(int)))
disk_facts, err := controller.decortAPICall("POST", DisksGetAPI, url_values)
if err != nil {
return "", err
}
return body_string, nil
}
disk_name, arg_set := d.GetOk("name")
if !arg_set {
// no disk ID and no disk name - we cannot locate disk in this case
return "", fmt.Error("Cannot locate disk if name is empty and no disk ID specified.")
}
account_id, acc_id_set := d.GetOk("account_id")
if !acc_id_set {
account_name, arg_set := d.GetOkd("account_name")
if !arg_set {
return "", fmt.Error("Cannot locate disk by name %s if neither account ID nor account name are set", disk_name.(string))
}
}
url_values.Add("accountId", fmt.Sprintf("%d", account_id.(int)))
disk_facts, err := controller.decortAPICall("POST", DisksListAPI, url_values)
if err != nil {
return "", err
}
log.Debugf("utilityDiskCheckPresence: ready to unmarshal string %q", disk_facts)
disks_list := []DiskRecord
err = json.Unmarshal([]byte(disk_facts), &disks_list)
if err != nil {
return "", err
}
// log.Printf("%#v", vm_list)
log.Debugf("utilityDiskCheckPresence: traversing decoded JSON of length %d", len(disks_list))
for _, item := range disks_list {
// need to match disk by name, return the first match
if item.Name == disk_name && item.Status != "DESTROYED" {
log.Printf("utilityDiskCheckPresence: index %d, matched disk name %q", index, item.Name)
// we found the disk we need - not get detailed information via API call to disks/get
// TODO: this may not be optimal as it initiates one extra call to the DECORT controller
// in spite of the fact that we already have all required information about the disk in
// item variable
get_url_values := &url.Values{}
get_url_values.Add("diskId", fmt.Sprintf("%d", item.ID))
disk_facts, err = controller.decortAPICall("POST", DisksGetAPI, get_url_values)
if err != nil {
return "", err
}
return disk_facts, nil
}
}
return "", nil // there should be no error if disk does not exist
}
Loading…
Cancel
Save