Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f5e0a53364 | ||
|
|
9d1c8eeaa7 | ||
|
|
8516e0419a |
54
CHANGELOG.md
54
CHANGELOG.md
@@ -1,49 +1,11 @@
|
||||
### Version 3.4.0
|
||||
### Version 3.4.3
|
||||
|
||||
### Features
|
||||
|
||||
- Add "seps" to the data source decort_account
|
||||
- Add "seps" to the resource decort_account
|
||||
- Add "shareable" to the data source decort_account_disk_list
|
||||
- Change "compute_id" and "compute_name" to "computes" in data source decort_disks
|
||||
- Change "compute_id" and "compute_name" to "computes" in resource decort_disks
|
||||
- Change "compute_id" and "compute_name" to "computes" in data source decort_disks_list
|
||||
- Add "shareable" to the data source decort_disk
|
||||
- Add "shareable" to the resource decort_disk
|
||||
- Add "shareable" to the data source decort_disk_list
|
||||
- Add "present_to" to the data source decort_disk
|
||||
- Add "present_to" to the data source decort_disk_list
|
||||
- Add "present_to" to the resource decort_disk
|
||||
- Add "shareable" to the data source decort_kvmvm
|
||||
- Add "shareable" to the resource decort_kvmvm
|
||||
- Add "size_max" to the data source decort_kvmvm
|
||||
- Add "size_max" to the resource decort_kvmvm
|
||||
- Add "size_used" to the data source decort_kvmvm
|
||||
- Add "size_used" to the resource decort_kvmvm
|
||||
- Add "present_to" to the data source decort_image
|
||||
- Add "present_to" to the resource decort_image
|
||||
- Add optional field "labels" to the resource decort_k8s
|
||||
- Add optional field "taints" to the resource decort_k8s
|
||||
- Add optional field "annotations" to the resource decort_k8s
|
||||
- Add optional field "sep_id" in block masters in the resource decort_k8s
|
||||
- Add optional field "sep_pool" in block masters in the resource decort_k8s
|
||||
- Add optional field "sep_id" in block workers in the resource decort_k8s
|
||||
- Add optional field "sep_pool" in block workers in the resource decort_k8s
|
||||
- Add "gid" to the data source decort_resgroup
|
||||
- Add "resources" to the data source decort_resgroup
|
||||
- Add "status" to the data source decort_resgroup
|
||||
- Add "vins" to the data source decort_resgroup
|
||||
- Add "vms" to the data source decort_resgroup
|
||||
- Add "gid" to the resource decort_resgroup
|
||||
- Add "resources" to the resource decort_resgroup
|
||||
- Add "status" to the resource decort_resgroup
|
||||
- Add "vins" to the resource decort_resgroup
|
||||
- Add "vms" to the resource decort_resgroup
|
||||
- Add optional field "force" to the resource decort_resgroup
|
||||
- Add optional field "permanently" to the resource decort_resgroup
|
||||
- Add optional field "reason" to the resource decort_resgroup
|
||||
- Add "resources" to the resource decort_resgroup
|
||||
- Add "gid" to the resource decort_resgroup
|
||||
- Add "resources" to the data source decort_resgroup
|
||||
- Add "gid" to the data source decort_resgroup
|
||||
- Change required field "account_id" to optional in data source decort_resgroup
|
||||
- Change field type disksize from int to float in:
|
||||
- resource decort_resgroup
|
||||
- resource decort_account
|
||||
- data source decort_rg
|
||||
- data source decort_account
|
||||
- data source decort_account_rg_list
|
||||
- Models of the resources
|
||||
|
||||
@@ -198,7 +198,7 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
},
|
||||
"disksize": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"extips": {
|
||||
@@ -254,7 +254,7 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
},
|
||||
"disksize": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"extips": {
|
||||
|
||||
@@ -74,12 +74,41 @@ func flattenAccRGComputes(argc AccountRGComputes) []map[string]interface{} {
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenAccResourceHack(r ResourceHack) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"cpu": r.CPU,
|
||||
"disksize": r.Disksize,
|
||||
"extips": r.Extips,
|
||||
"exttraffic": r.Exttraffic,
|
||||
"gpu": r.GPU,
|
||||
"ram": r.RAM,
|
||||
//"seps": flattenAccountSeps(r.SEPs),
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenAccResourceRg(r Resource) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"cpu": r.CPU,
|
||||
"disksize": r.Disksize,
|
||||
"extips": r.Extips,
|
||||
"exttraffic": r.Exttraffic,
|
||||
"gpu": r.GPU,
|
||||
"ram": r.RAM,
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenAccRGResources(argr AccountRGResources) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"consumed": flattenAccResource(argr.Consumed),
|
||||
"limits": flattenAccResource(argr.Limits),
|
||||
"reserved": flattenAccResource(argr.Reserved),
|
||||
"consumed": flattenAccResourceRg(argr.Consumed),
|
||||
"limits": flattenAccResourceHack(argr.Limits),
|
||||
"reserved": flattenAccResourceRg(argr.Reserved),
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
|
||||
@@ -96,7 +96,7 @@ type ResourceSep struct {
|
||||
|
||||
type Resource struct {
|
||||
CPU int `json:"cpu"`
|
||||
Disksize int `json:"disksize"`
|
||||
Disksize float64 `json:"disksize"`
|
||||
Extips int `json:"extips"`
|
||||
Exttraffic int `json:"exttraffic"`
|
||||
GPU int `json:"gpu"`
|
||||
@@ -201,10 +201,19 @@ type AccountRGComputes struct {
|
||||
Stopped int `json:"Stopped"`
|
||||
}
|
||||
|
||||
type ResourceHack struct {
|
||||
CPU int `json:"cpu"`
|
||||
Disksize float64 `json:"disksize"`
|
||||
Extips int `json:"extips"`
|
||||
Exttraffic int `json:"exttraffic"`
|
||||
GPU int `json:"gpu"`
|
||||
RAM int `json:"ram"`
|
||||
}
|
||||
|
||||
type AccountRGResources struct {
|
||||
Consumed Resource `json:"Consumed"`
|
||||
Limits Resource `json:"Limits"`
|
||||
Reserved Resource `json:"Reserved"`
|
||||
Consumed Resource `json:"Consumed"`
|
||||
Limits ResourceHack `json:"Limits"`
|
||||
Reserved Resource `json:"Reserved"`
|
||||
}
|
||||
|
||||
type AccountRG struct {
|
||||
|
||||
@@ -574,7 +574,7 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
},
|
||||
"disksize": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"extips": {
|
||||
@@ -630,7 +630,7 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
},
|
||||
"disksize": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"extips": {
|
||||
|
||||
@@ -385,7 +385,7 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Description: "Size in GB",
|
||||
},
|
||||
"size_used": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
Description: "Number of used space, in GB",
|
||||
},
|
||||
|
||||
@@ -481,7 +481,7 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
|
||||
Description: "Size in GB",
|
||||
},
|
||||
"size_used": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
Description: "Number of used space, in GB",
|
||||
},
|
||||
|
||||
@@ -415,7 +415,7 @@ func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
|
||||
Description: "Size in GB",
|
||||
},
|
||||
"size_used": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
Description: "Number of used space, in GB",
|
||||
},
|
||||
|
||||
@@ -74,7 +74,7 @@ type Disk struct {
|
||||
Shareable bool `json:"shareable"`
|
||||
SepID int `json:"sepId"` // NOTE: absent from compute/get output
|
||||
SizeMax int `json:"sizeMax"`
|
||||
SizeUsed int `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
|
||||
SizeUsed float64 `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
|
||||
Snapshots []Snapshot `json:"snapshots"`
|
||||
Status string `json:"status"`
|
||||
TechStatus string `json:"techStatus"`
|
||||
@@ -165,7 +165,7 @@ type Unattached struct {
|
||||
Role string `json:"role"`
|
||||
SepID int `json:"sepId"`
|
||||
SizeMax int `json:"sizeMax"`
|
||||
SizeUsed int `json:"sizeUsed"`
|
||||
SizeUsed float64 `json:"sizeUsed"`
|
||||
Snapshots []Snapshot `json:"snapshots"`
|
||||
Status string `json:"status"`
|
||||
TechStatus string `json:"techStatus"`
|
||||
|
||||
@@ -661,7 +661,7 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform",
|
||||
},
|
||||
"size_used": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
Description: "Number of used space, in GB",
|
||||
},
|
||||
|
||||
@@ -52,4 +52,14 @@ const (
|
||||
ComputeRestoreAPI = "/restmachine/cloudapi/compute/restore"
|
||||
ComputeEnableAPI = "/restmachine/cloudapi/compute/enable"
|
||||
ComputeDisableAPI = "/restmachine/cloudapi/compute/disable"
|
||||
|
||||
//affinity and anti-affinity
|
||||
ComputeAffinityLabelSetAPI = "/restmachine/cloudapi/compute/affinityLabelSet"
|
||||
ComputeAffinityLabelRemoveAPI = "/restmachine/cloudapi/compute/affinityLabelRemove"
|
||||
ComputeAffinityRuleAddAPI = "/restmachine/cloudapi/compute/affinityRuleAdd"
|
||||
ComputeAffinityRuleRemoveAPI = "/restmachine/cloudapi/compute/affinityRuleRemove"
|
||||
ComputeAffinityRulesClearAPI = "/restmachine/cloudapi/compute/affinityRulesClear"
|
||||
ComputeAntiAffinityRuleAddAPI = "/restmachine/cloudapi/compute/antiAffinityRuleAdd"
|
||||
ComputeAntiAffinityRuleRemoveAPI = "/restmachine/cloudapi/compute/antiAffinityRuleRemove"
|
||||
ComputeAntiAffinityRulesClearAPI = "/restmachine/cloudapi/compute/antiAffinityRulesClear"
|
||||
)
|
||||
|
||||
@@ -386,7 +386,7 @@ func DataSourceCompute() *schema.Resource {
|
||||
Computed: true,
|
||||
},
|
||||
"size_used": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"pool": {
|
||||
|
||||
@@ -74,7 +74,7 @@ type DiskRecord struct {
|
||||
SepID int `json:"sepId"` // NOTE: absent from compute/get output
|
||||
Shareable bool `json:"shareable"`
|
||||
SizeMax int `json:"sizeMax"`
|
||||
SizeUsed int `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
|
||||
SizeUsed float64 `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
|
||||
Snapshots []SnapshotRecord `json:"snapshots"`
|
||||
Status string `json:"status"`
|
||||
TechStatus string `json:"techStatus"`
|
||||
|
||||
@@ -225,6 +225,18 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
|
||||
if !cleanup {
|
||||
if affinityLabel, ok := d.GetOk("affinity_label"); ok {
|
||||
affinityLabel := affinityLabel.(string)
|
||||
urlValues := &url.Values{}
|
||||
urlValues.Add("computeId", d.Id())
|
||||
urlValues.Add("affinityLabel", affinityLabel)
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeAffinityLabelSetAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
|
||||
if disks, ok := d.GetOk("disks"); ok {
|
||||
log.Debugf("resourceComputeCreate: Create disks on ComputeID: %d", compId)
|
||||
addedDisks := disks.([]interface{})
|
||||
@@ -259,6 +271,52 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ars, ok := d.GetOk("affinity_rules"); ok {
|
||||
log.Debugf("resourceComputeCreate: Create affinity rules on ComputeID: %d", compId)
|
||||
addedAR := ars.([]interface{})
|
||||
if len(addedAR) > 0 {
|
||||
for _, ar := range addedAR {
|
||||
arConv := ar.(map[string]interface{})
|
||||
|
||||
urlValues.Add("computeId", d.Id())
|
||||
urlValues.Add("topology", arConv["topology"].(string))
|
||||
urlValues.Add("policy", arConv["policy"].(string))
|
||||
urlValues.Add("mode", arConv["mode"].(string))
|
||||
urlValues.Add("key", arConv["key"].(string))
|
||||
urlValues.Add("value", arConv["value"].(string))
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeAffinityRuleAddAPI, urlValues)
|
||||
if err != nil {
|
||||
cleanup = true
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ars, ok := d.GetOk("anti_affinity_rules"); ok {
|
||||
log.Debugf("resourceComputeCreate: Create anti affinity rules on ComputeID: %d", compId)
|
||||
addedAR := ars.([]interface{})
|
||||
if len(addedAR) > 0 {
|
||||
for _, ar := range addedAR {
|
||||
arConv := ar.(map[string]interface{})
|
||||
|
||||
urlValues.Add("computeId", d.Id())
|
||||
urlValues.Add("topology", arConv["topology"].(string))
|
||||
urlValues.Add("policy", arConv["policy"].(string))
|
||||
urlValues.Add("mode", arConv["mode"].(string))
|
||||
urlValues.Add("key", arConv["key"].(string))
|
||||
urlValues.Add("value", arConv["value"].(string))
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeAntiAffinityRuleAddAPI, urlValues)
|
||||
if err != nil {
|
||||
cleanup = true
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", compId, d.Get("name").(string))
|
||||
@@ -576,6 +634,155 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("affinity_label") {
|
||||
affinityLabel := d.Get("affinity_label").(string)
|
||||
urlValues.Add("computeId", d.Id())
|
||||
if affinityLabel == "" {
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeAffinityLabelRemoveAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
urlValues.Add("affinityLabel", affinityLabel)
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeAffinityLabelSetAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
|
||||
if d.HasChange("affinity_rules") {
|
||||
deletedAR := make([]interface{}, 0)
|
||||
addedAR := make([]interface{}, 0)
|
||||
|
||||
oldAR, newAR := d.GetChange("affinity_rules")
|
||||
oldConv := oldAR.([]interface{})
|
||||
newConv := newAR.([]interface{})
|
||||
|
||||
if len(newConv) == 0 {
|
||||
urlValues := &url.Values{}
|
||||
urlValues.Add("computeId", d.Id())
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeAffinityRulesClearAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
} else {
|
||||
for _, el := range oldConv {
|
||||
if !isContainsAR(newConv, el) {
|
||||
deletedAR = append(deletedAR, el)
|
||||
}
|
||||
}
|
||||
for _, el := range newConv {
|
||||
if !isContainsAR(oldConv, el) {
|
||||
addedAR = append(addedAR, el)
|
||||
}
|
||||
}
|
||||
|
||||
if len(deletedAR) > 0 {
|
||||
urlValues := &url.Values{}
|
||||
for _, ar := range deletedAR {
|
||||
arConv := ar.(map[string]interface{})
|
||||
urlValues.Add("computeId", d.Id())
|
||||
urlValues.Add("topology", arConv["topology"].(string))
|
||||
urlValues.Add("policy", arConv["policy"].(string))
|
||||
urlValues.Add("mode", arConv["mode"].(string))
|
||||
urlValues.Add("key", arConv["key"].(string))
|
||||
urlValues.Add("value", arConv["value"].(string))
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeAffinityRuleRemoveAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
}
|
||||
if len(addedAR) > 0 {
|
||||
for _, ar := range addedAR {
|
||||
arConv := ar.(map[string]interface{})
|
||||
urlValues.Add("computeId", d.Id())
|
||||
urlValues.Add("topology", arConv["topology"].(string))
|
||||
urlValues.Add("policy", arConv["policy"].(string))
|
||||
urlValues.Add("mode", arConv["mode"].(string))
|
||||
urlValues.Add("key", arConv["key"].(string))
|
||||
urlValues.Add("value", arConv["value"].(string))
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeAffinityRuleAddAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if d.HasChange("anti_affinity_rules") {
|
||||
deletedAR := make([]interface{}, 0)
|
||||
addedAR := make([]interface{}, 0)
|
||||
|
||||
oldAR, newAR := d.GetChange("anti_affinity_rules")
|
||||
oldConv := oldAR.([]interface{})
|
||||
newConv := newAR.([]interface{})
|
||||
|
||||
if len(newConv) == 0 {
|
||||
urlValues := &url.Values{}
|
||||
urlValues.Add("computeId", d.Id())
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeAntiAffinityRulesClearAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
} else {
|
||||
for _, el := range oldConv {
|
||||
if !isContainsAR(newConv, el) {
|
||||
deletedAR = append(deletedAR, el)
|
||||
}
|
||||
}
|
||||
for _, el := range newConv {
|
||||
if !isContainsAR(oldConv, el) {
|
||||
addedAR = append(addedAR, el)
|
||||
}
|
||||
}
|
||||
|
||||
if len(deletedAR) > 0 {
|
||||
urlValues := &url.Values{}
|
||||
for _, ar := range deletedAR {
|
||||
arConv := ar.(map[string]interface{})
|
||||
urlValues.Add("computeId", d.Id())
|
||||
urlValues.Add("topology", arConv["topology"].(string))
|
||||
urlValues.Add("policy", arConv["policy"].(string))
|
||||
urlValues.Add("mode", arConv["mode"].(string))
|
||||
urlValues.Add("key", arConv["key"].(string))
|
||||
urlValues.Add("value", arConv["value"].(string))
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeAntiAffinityRuleRemoveAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
}
|
||||
if len(addedAR) > 0 {
|
||||
for _, ar := range addedAR {
|
||||
arConv := ar.(map[string]interface{})
|
||||
urlValues.Add("computeId", d.Id())
|
||||
urlValues.Add("topology", arConv["topology"].(string))
|
||||
urlValues.Add("policy", arConv["policy"].(string))
|
||||
urlValues.Add("mode", arConv["mode"].(string))
|
||||
urlValues.Add("key", arConv["key"].(string))
|
||||
urlValues.Add("value", arConv["value"].(string))
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeAntiAffinityRuleAddAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// we may reuse dataSourceComputeRead here as we maintain similarity
|
||||
// between Compute resource and Compute data source schemas
|
||||
return resourceComputeRead(ctx, d, m)
|
||||
@@ -592,6 +799,21 @@ func isContainsDisk(els []interface{}, el interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func isContainsAR(els []interface{}, el interface{}) bool {
|
||||
for _, elOld := range els {
|
||||
elOldConv := elOld.(map[string]interface{})
|
||||
elConv := el.(map[string]interface{})
|
||||
if elOldConv["key"].(string) == elConv["key"].(string) &&
|
||||
elOldConv["value"].(string) == elConv["value"].(string) &&
|
||||
elOldConv["mode"].(string) == elConv["mode"].(string) &&
|
||||
elOldConv["topology"].(string) == elConv["topology"].(string) &&
|
||||
elOldConv["policy"].(string) == elConv["policy"].(string) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
// NOTE: this function destroys target Compute instance "permanently", so
|
||||
// there is no way to restore it.
|
||||
@@ -665,6 +887,86 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
||||
Description: "This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image.",
|
||||
},
|
||||
|
||||
"affinity_label": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Set affinity label for compute",
|
||||
},
|
||||
|
||||
"affinity_rules": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"topology": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{"node", "compute"}, false),
|
||||
Description: "compute or node, for whom rule applies",
|
||||
},
|
||||
"policy": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{"RECOMMENDED", "REQUIRED"}, false),
|
||||
Description: "RECOMMENDED or REQUIRED, the degree of 'strictness' of this rule",
|
||||
},
|
||||
"mode": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{"EQ", "NE", "ANY"}, false),
|
||||
Description: "EQ or NE or ANY - the comparison mode is 'value', recorded by the specified 'key'",
|
||||
},
|
||||
"key": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "key that are taken into account when analyzing this rule will be identified",
|
||||
},
|
||||
"value": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "value that must match the key to be taken into account when analyzing this rule",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"anti_affinity_rules": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"topology": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{"node", "compute"}, false),
|
||||
Description: "compute or node, for whom rule applies",
|
||||
},
|
||||
"policy": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{"RECOMMENDED", "REQUIRED"}, false),
|
||||
Description: "RECOMMENDED or REQUIRED, the degree of 'strictness' of this rule",
|
||||
},
|
||||
"mode": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{"EQ", "NE", "ANY"}, false),
|
||||
Description: "EQ or NE or ANY - the comparison mode is 'value', recorded by the specified 'key'",
|
||||
},
|
||||
"key": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "key that are taken into account when analyzing this rule will be identified",
|
||||
},
|
||||
"value": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "value that must match the key to be taken into account when analyzing this rule",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"disks": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
@@ -703,7 +1005,7 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
},
|
||||
"size_used": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"pool": {
|
||||
|
||||
@@ -137,12 +137,12 @@ type QuotaRecord struct { // this is how quota is reported by /api/.../rg/get
|
||||
}
|
||||
|
||||
type ResourceRecord struct { // this is how actual usage is reported by /api/.../rg/get
|
||||
Cpu int `json:"cpu"`
|
||||
Disk int `json:"disksize"`
|
||||
ExtIPs int `json:"extips"`
|
||||
ExtTraffic int `json:"exttraffic"`
|
||||
Gpu int `json:"gpu"`
|
||||
Ram int `json:"ram"`
|
||||
Cpu int `json:"cpu"`
|
||||
Disk float64 `json:"disksize"`
|
||||
ExtIPs int `json:"extips"`
|
||||
ExtTraffic int `json:"exttraffic"`
|
||||
Gpu int `json:"gpu"`
|
||||
Ram int `json:"ram"`
|
||||
}
|
||||
|
||||
type UsageRecord struct {
|
||||
@@ -157,7 +157,7 @@ type ResourceSep struct {
|
||||
|
||||
type Resource struct {
|
||||
CPU int `json:"cpu"`
|
||||
Disksize int `json:"disksize"`
|
||||
Disksize float64 `json:"disksize"`
|
||||
Extips int `json:"extips"`
|
||||
Exttraffic int `json:"exttraffic"`
|
||||
GPU int `json:"gpu"`
|
||||
|
||||
@@ -317,7 +317,7 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
},
|
||||
"size_used": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"snapshots": {
|
||||
|
||||
@@ -399,7 +399,7 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
},
|
||||
"size_used": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"snapshots": {
|
||||
|
||||
@@ -72,7 +72,7 @@ type Disk struct {
|
||||
SepType string `json:"sepType"`
|
||||
SepID int `json:"sepId"` // NOTE: absent from compute/get output
|
||||
SizeMax int `json:"sizeMax"`
|
||||
SizeUsed int `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
|
||||
SizeUsed float64 `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
|
||||
Snapshots []Snapshot `json:"snapshots"`
|
||||
Status string `json:"status"`
|
||||
TechStatus string `json:"techStatus"`
|
||||
|
||||
@@ -548,7 +548,7 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
},
|
||||
"size_used": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"snapshots": {
|
||||
|
||||
@@ -72,7 +72,7 @@ type DiskRecord struct {
|
||||
SepType string `json:"sepType"`
|
||||
SepID int `json:"sepId"` // NOTE: absent from compute/get output
|
||||
SizeMax int `json:"sizeMax"`
|
||||
SizeUsed int `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
|
||||
SizeUsed float64 `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
|
||||
Snapshots []SnapshotRecord `json:"snapshots"`
|
||||
Status string `json:"status"`
|
||||
TechStatus string `json:"techStatus"`
|
||||
|
||||
@@ -115,10 +115,98 @@ resource "decort_kvmvm" "comp" {
|
||||
#тип - bool
|
||||
permanently = false
|
||||
|
||||
#правила affinity
|
||||
#опциональный параметр
|
||||
#может быть один, несколько или ни одного блока
|
||||
#тип - блок
|
||||
#affinity_rules {
|
||||
#тип правила
|
||||
#возможные значения - compute или node
|
||||
#обязательный параметр
|
||||
#тип - строка
|
||||
#topology = "compute"
|
||||
|
||||
#строгость правила
|
||||
#возможные значения - RECOMMENDED и REQUIRED
|
||||
#обязательный параметр
|
||||
#тип - строка
|
||||
#policy = "RECOMMENDED"
|
||||
|
||||
#режим проверки
|
||||
#возможные значения - ANY, EQ, NE
|
||||
#обязательный параметр
|
||||
#тип - строка
|
||||
#mode = "ANY"
|
||||
|
||||
#ключ правила
|
||||
#обязательный параметр
|
||||
#тип строка
|
||||
#key = "testkey"
|
||||
|
||||
#ключ правила
|
||||
#обязательный параметр
|
||||
#тип строка
|
||||
#value = "testvalue"
|
||||
#}
|
||||
|
||||
#правила anti-affinity
|
||||
#опциональный параметр
|
||||
#может быть один, несколько или ни одного блока
|
||||
#тип - блок
|
||||
#anti_affinity_rules {
|
||||
#тип правила
|
||||
#возможные значения - compute или node
|
||||
#обязательный параметр
|
||||
#тип - строка
|
||||
#topology = "compute"
|
||||
|
||||
#строгость правила
|
||||
#возможные значения - RECOMMENDED и REQUIRED
|
||||
#обязательный параметр
|
||||
#тип - строка
|
||||
#policy = "RECOMMENDED"
|
||||
|
||||
#режим проверки
|
||||
#возможные значения - ANY, EQ, NE
|
||||
#обязательный параметр
|
||||
#тип - строка
|
||||
#mode = "ANY"
|
||||
|
||||
#ключ правила
|
||||
#обязательный параметр
|
||||
#тип строка
|
||||
#key = "testkey"
|
||||
|
||||
#ключ правила
|
||||
#обязательный параметр
|
||||
#тип строка
|
||||
#value = "testvalue"
|
||||
#}
|
||||
|
||||
#Флаг доступности компьюта для проведения с ним операций
|
||||
#опицональный параметр
|
||||
#тип - bool
|
||||
#enable = true
|
||||
|
||||
#установка метки для вм
|
||||
#опциональный параметр
|
||||
#тип - строка
|
||||
#affinity_label = "test4"
|
||||
|
||||
|
||||
#наименование системы
|
||||
#опциональный параметр
|
||||
#используется при создании вм
|
||||
#по умолчанию - не задан
|
||||
#тип - строка
|
||||
#is=""
|
||||
|
||||
#назначение вм
|
||||
#опциональный параметр
|
||||
#используется при создании вм
|
||||
#по умолчанию - не задан
|
||||
#тип - строка
|
||||
#ipa_type = ""
|
||||
}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user