4.10.1
This commit is contained in:
@@ -127,6 +127,22 @@ func resourceLimitsSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"limit": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
@@ -288,17 +304,6 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
// Schema: resourcesSchemaMake(),
|
||||
// },
|
||||
// },
|
||||
"ckey": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"meta": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"zone_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
@@ -388,10 +393,6 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"service_account": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -437,6 +438,13 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
@@ -116,6 +116,22 @@ func dataSourceResourceLimitsSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"limit": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
|
||||
@@ -55,6 +55,7 @@ func flattenAccountList(al *account.ListAccounts) []map[string]interface{} {
|
||||
"account_name": acc.Name,
|
||||
"status": acc.Status,
|
||||
"updated_time": acc.UpdatedTime,
|
||||
"zone_ids": acc.ZoneIDs,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
@@ -129,6 +130,11 @@ func dataSourceAccountListSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "Page size",
|
||||
},
|
||||
"zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Zone ID",
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
@@ -201,6 +207,13 @@ func dataSourceAccountListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"zone_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -139,6 +139,35 @@ func dataSourceAccResourceSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"policies": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"seps": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: dataSourceSepsSchemaMake(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"ram": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
func flattenAccount(d *schema.ResourceData, acc account.RecordAccount) error {
|
||||
d.Set("dc_location", acc.DCLocation)
|
||||
d.Set("desc", acc.Description)
|
||||
//TODO
|
||||
// d.Set("resources", flattenAccResources(acc.Resources))
|
||||
d.Set("ckey", acc.CKey)
|
||||
d.Set("acl", flattenAccAcl(acc.ACL))
|
||||
d.Set("company", acc.Company)
|
||||
d.Set("companyurl", acc.CompanyURL)
|
||||
@@ -37,6 +37,7 @@ func flattenAccount(d *schema.ResourceData, acc account.RecordAccount) error {
|
||||
d.Set("cpu_allocation_parameter", acc.CPUAllocationParameter)
|
||||
d.Set("cpu_allocation_ratio", acc.CPUAllocationRatio)
|
||||
d.Set("default_zone_id", acc.DefaultZoneID)
|
||||
d.Set("storage_policy_ids", acc.StoragePolicyIDs)
|
||||
d.Set("zone_ids", flattenZones(acc.ZoneIDs))
|
||||
|
||||
if username, ok := d.GetOk("username"); ok {
|
||||
@@ -48,6 +49,44 @@ func flattenAccount(d *schema.ResourceData, acc account.RecordAccount) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenAccountResource(d *schema.ResourceData, acc account.RecordAccount) error {
|
||||
d.Set("dc_location", acc.DCLocation)
|
||||
d.Set("desc", acc.Description)
|
||||
//TODO
|
||||
// d.Set("resources", flattenAccResources(acc.Resources))
|
||||
d.Set("acl", flattenAccAcl(acc.ACL))
|
||||
d.Set("company", acc.Company)
|
||||
d.Set("companyurl", acc.CompanyURL)
|
||||
d.Set("compute_features", acc.ComputeFeatures)
|
||||
d.Set("created_by", acc.CreatedBy)
|
||||
d.Set("created_time", acc.CreatedTime)
|
||||
d.Set("deactivation_time", acc.DeactivationTime)
|
||||
d.Set("desc", acc.Description)
|
||||
d.Set("deleted_by", acc.DeletedBy)
|
||||
d.Set("deleted_time", acc.DeletedTime)
|
||||
d.Set("displayname", acc.DisplayName)
|
||||
d.Set("enable", flattenEnabled(acc.Status))
|
||||
d.Set("guid", acc.GUID)
|
||||
d.Set("account_id", acc.ID)
|
||||
d.Set("account_name", acc.Name)
|
||||
d.Set("resource_limits", flattenAccResourceLimits(acc.ResourceLimits))
|
||||
d.Set("send_access_emails", acc.SendAccessEmails)
|
||||
d.Set("status", acc.Status)
|
||||
d.Set("updated_time", acc.UpdatedTime)
|
||||
d.Set("version", acc.Version)
|
||||
d.Set("vins", acc.VINS)
|
||||
d.Set("vinses", acc.VINSes)
|
||||
d.Set("computes", flattenAccComputes(acc.Computes))
|
||||
d.Set("machines", flattenAccMachines(acc.Machines))
|
||||
d.Set("cpu_allocation_parameter", acc.CPUAllocationParameter)
|
||||
d.Set("cpu_allocation_ratio", acc.CPUAllocationRatio)
|
||||
d.Set("default_zone_id", acc.DefaultZoneID)
|
||||
d.Set("storage_policy_ids", acc.StoragePolicyIDs)
|
||||
d.Set("zone_ids", flattenZonesInResource(acc.ZoneIDs))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenEnabled(status string) bool {
|
||||
return status == "CONFIRMED"
|
||||
}
|
||||
@@ -100,20 +139,32 @@ func flattenResourceConsumption(d *schema.ResourceData, acc *account.RecordResou
|
||||
func flattenAccResourceLimits(rl account.ResourceLimits) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"cu_c": rl.CUC,
|
||||
"cu_d": rl.CUD,
|
||||
"cu_dm": rl.CUDM,
|
||||
"cu_i": rl.CUI,
|
||||
"cu_m": rl.CUM,
|
||||
"cu_np": rl.CUNP,
|
||||
"gpu_units": rl.GPUUnits,
|
||||
"cu_c": rl.CUC,
|
||||
"cu_d": rl.CUD,
|
||||
"cu_dm": rl.CUDM,
|
||||
"cu_i": rl.CUI,
|
||||
"cu_m": rl.CUM,
|
||||
"cu_np": rl.CUNP,
|
||||
"gpu_units": rl.GPUUnits,
|
||||
"storage_policy": flattenSTPolicy(rl.StoragePolicy),
|
||||
}
|
||||
res = append(res, temp)
|
||||
|
||||
return res
|
||||
|
||||
}
|
||||
|
||||
func flattenSTPolicy(ast []account.StoragePolicyItem) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(ast))
|
||||
for _, item := range ast {
|
||||
temp := map[string]interface{}{
|
||||
"id": item.ID,
|
||||
"limit": item.Limit,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// func flattenAccResources(r account.Resources) []map[string]interface{} {
|
||||
// res := make([]map[string]interface{}, 0)
|
||||
// temp := map[string]interface{}{
|
||||
@@ -151,11 +202,26 @@ func flattenAccResource(r account.Resource) []map[string]interface{} {
|
||||
"gpu": r.GPU,
|
||||
"ram": r.RAM,
|
||||
"seps": flattenAccountSeps(r.SEPs),
|
||||
"policies": flattenAccountPolicies(r.Policies),
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenAccountPolicies(policies map[string]account.Policy) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for k, dataVal := range policies {
|
||||
temp := map[string]interface{}{
|
||||
"id": k,
|
||||
"disk_size": dataVal.DiskSize,
|
||||
"disk_size_max": dataVal.DiskSizeMax,
|
||||
"seps": flattenAccountSeps(dataVal.SEPs),
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenAccResourceConsumption(lrc *account.ListResourceConsumption) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(lrc.Data))
|
||||
for _, rc := range lrc.Data {
|
||||
@@ -180,3 +246,11 @@ func flattenZones(zones []account.ZoneID) []map[string]interface{} {
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenZonesInResource(zones []account.ZoneID) []int64 {
|
||||
res := make([]int64, 0)
|
||||
for _, zone := range zones {
|
||||
res = append(res, zone.ID)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
@@ -100,7 +100,7 @@ func resourceAccountRead(ctx context.Context, d *schema.ResourceData, m interfac
|
||||
}
|
||||
}
|
||||
|
||||
flattenAccount(d, *acc)
|
||||
flattenAccountResource(d, *acc)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -495,7 +495,6 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
},
|
||||
"zone_ids": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
@@ -535,6 +534,10 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"cu_dm": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"cu_i": {
|
||||
Type: schema.TypeFloat,
|
||||
Optional: true,
|
||||
@@ -555,6 +558,22 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"limit": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -573,17 +592,6 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
// Schema: resourcesSchemaMake(),
|
||||
// },
|
||||
// },
|
||||
"ckey": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
// "meta": {
|
||||
// Type: schema.TypeList,
|
||||
// Computed: true,
|
||||
// Elem: &schema.Schema{
|
||||
// Type: schema.TypeString,
|
||||
// },
|
||||
// },
|
||||
"acl": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
@@ -634,10 +642,6 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"service_account": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -683,6 +687,13 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -73,6 +73,10 @@ func utilityAccountListCheckPresence(ctx context.Context, d *schema.ResourceData
|
||||
req.SortBy = sortBy.(string)
|
||||
}
|
||||
|
||||
if zoneID, ok := d.GetOk("zone_id"); ok {
|
||||
req.ZoneID = uint64(zoneID.(int))
|
||||
}
|
||||
|
||||
log.Debugf("utilityAccountListCheckPresence: load account list")
|
||||
accountList, err := c.CloudAPI().Account().List(ctx, req)
|
||||
if err != nil {
|
||||
|
||||
@@ -75,7 +75,7 @@ func dataSourceAuditSchemaMake() map[string]*schema.Schema {
|
||||
Required: true,
|
||||
Description: "audit guid",
|
||||
},
|
||||
|
||||
|
||||
"args": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -84,6 +84,10 @@ func dataSourceAuditSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"correlation_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
|
||||
244
internal/service/cloudapi/audit/data_source_audit_list.go
Normal file
244
internal/service/cloudapi/audit/data_source_audit_list.go
Normal file
@@ -0,0 +1,244 @@
|
||||
/*
|
||||
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Sergey Kisil, <svkisil@digitalenergy.online>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package audit
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceAuditListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
auditList, err := utilityAuditListCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
d.SetId("")
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
d.Set("items", flattenAuditList(auditList))
|
||||
d.Set("entry_count", auditList.EntryCount)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func DataSourceAuditList() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceAuditListRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceAuditListSchemaMake(),
|
||||
}
|
||||
}
|
||||
|
||||
func dataSourceAuditListSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"timestamp_at": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "find all audits after point in time (unixtime)",
|
||||
},
|
||||
"timestamp_to": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "find all audits before point in time (unixtime)",
|
||||
},
|
||||
"user": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "find by user (Mongo RegExp supported)",
|
||||
},
|
||||
"call": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "find by api endpoint (Mongo RegExp supported)",
|
||||
},
|
||||
"min_status_code": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "find by HTTP min status code",
|
||||
},
|
||||
"max_status_code": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "find by HTTP max status code",
|
||||
},
|
||||
"sort_by": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "sort by one of supported fields, format +|-(field)",
|
||||
},
|
||||
"page": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "page number",
|
||||
},
|
||||
"request_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "request id",
|
||||
},
|
||||
"size": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "page size",
|
||||
},
|
||||
"resgroup_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"vins_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"service_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"k8s_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"flipgroup_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"lb_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"exclude_audit_lines": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"resgroup_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"call": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"correlation_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"responsetime": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"status_code": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"user": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"ttl": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"args": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"kwargs": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"result": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp_end": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"remote_addr": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "entry count",
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -43,6 +43,7 @@ func flattenAudit(d *schema.ResourceData, au *audit.RecordAudit) {
|
||||
|
||||
d.Set("args", au.Arguments)
|
||||
d.Set("call", au.Call)
|
||||
d.Set("correlation_id", au.CorrelationID)
|
||||
d.Set("guid", au.GUID)
|
||||
d.Set("kwargs", au.Kwargs)
|
||||
d.Set("remote_addr", au.RemoteAddr)
|
||||
@@ -54,3 +55,26 @@ func flattenAudit(d *schema.ResourceData, au *audit.RecordAudit) {
|
||||
d.Set("timestamp_end", au.TimestampEnd)
|
||||
d.Set("user", au.User)
|
||||
}
|
||||
|
||||
func flattenAuditList(au *audit.ListAudits) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(au.Data))
|
||||
for _, item := range au.Data {
|
||||
temp := map[string]interface{}{
|
||||
"args": item.Args,
|
||||
"call": item.Call,
|
||||
"correlation_id": item.CorrelationID,
|
||||
"guid": item.GUID,
|
||||
"kwargs": item.Kwargs,
|
||||
"remote_addr": item.RemoteAddr,
|
||||
"result": item.Result,
|
||||
"responsetime": item.ResponseTime,
|
||||
"status_code": item.StatusCode,
|
||||
"timestamp": item.Timestamp,
|
||||
"timestamp_end": item.TimestampEnd,
|
||||
"ttl": item.TTL,
|
||||
"user": item.User,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
117
internal/service/cloudapi/audit/utility_audit_list.go
Normal file
117
internal/service/cloudapi/audit/utility_audit_list.go
Normal file
@@ -0,0 +1,117 @@
|
||||
/*
|
||||
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Sergey Kisil, <svkisil@digitalenergy.online>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package audit
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/audit"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
)
|
||||
|
||||
func utilityAuditListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*audit.ListAudits, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := audit.ListRequest{}
|
||||
|
||||
if timestampAt, ok := d.GetOk("timestamp_at"); ok {
|
||||
req.TimestampAt = uint64(timestampAt.(int))
|
||||
}
|
||||
if timestampTo, ok := d.GetOk("timestamp_to"); ok {
|
||||
req.TimestampTo = uint64(timestampTo.(int))
|
||||
}
|
||||
if user, ok := d.GetOk("user"); ok {
|
||||
req.User = user.(string)
|
||||
}
|
||||
if call, ok := d.GetOk("call"); ok {
|
||||
req.Call = call.(string)
|
||||
}
|
||||
if minStatusCode, ok := d.GetOk("min_status_code"); ok {
|
||||
req.MinStatusCode = uint64(minStatusCode.(int))
|
||||
}
|
||||
if maxStatusCode, ok := d.GetOk("max_status_code"); ok {
|
||||
req.MaxStatusCode = uint64(maxStatusCode.(int))
|
||||
}
|
||||
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||
req.SortBy = sortBy.(string)
|
||||
}
|
||||
if Page, ok := d.GetOk("page"); ok {
|
||||
req.Page = uint64(Page.(int))
|
||||
}
|
||||
if RequestID, ok := d.GetOk("request_id"); ok {
|
||||
req.RequestID = RequestID.(string)
|
||||
}
|
||||
if Size, ok := d.GetOk("size"); ok {
|
||||
req.Size = uint64(Size.(int))
|
||||
}
|
||||
if resgroupID, ok := d.GetOk("resgroup_id"); ok {
|
||||
req.RGID = uint64(resgroupID.(int))
|
||||
}
|
||||
if computeID, ok := d.GetOk("compute_id"); ok {
|
||||
req.ComputeID = uint64(computeID.(int))
|
||||
}
|
||||
if accountID, ok := d.GetOk("account_id"); ok {
|
||||
req.AccountID = uint64(accountID.(int))
|
||||
}
|
||||
if vinsID, ok := d.GetOk("vins_id"); ok {
|
||||
req.VINSID = uint64(vinsID.(int))
|
||||
}
|
||||
if serviceID, ok := d.GetOk("service_id"); ok {
|
||||
req.ServiceID = uint64(serviceID.(int))
|
||||
}
|
||||
if k8sID, ok := d.GetOk("k8s_id"); ok {
|
||||
req.K8SID = uint64(k8sID.(int))
|
||||
}
|
||||
if flipgroupID, ok := d.GetOk("flipgroup_id"); ok {
|
||||
req.FLIPGroupID = uint64(flipgroupID.(int))
|
||||
}
|
||||
if lbID, ok := d.GetOk("lb_id"); ok {
|
||||
req.LBID = uint64(lbID.(int))
|
||||
}
|
||||
if sepID, ok := d.GetOk("sep_id"); ok {
|
||||
req.SEPID = uint64(sepID.(int))
|
||||
}
|
||||
if excludeAuditLines, ok := d.GetOk("exclude_audit_lines"); ok {
|
||||
req.ExcludeAuditLines = excludeAuditLines.(bool)
|
||||
}
|
||||
|
||||
log.Debugf("utilityAuditListCheckPresence: load audit list")
|
||||
auditList, err := c.CloudAPI().Audit().List(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return auditList, nil
|
||||
}
|
||||
@@ -147,6 +147,11 @@ func dataSourceBasicServiceListSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "Page size",
|
||||
},
|
||||
"zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Zone ID",
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
|
||||
@@ -51,14 +51,15 @@ func resourceBasicServiceGroupCreate(ctx context.Context, d *schema.ResourceData
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := bservice.GroupAddRequest{
|
||||
ServiceID: uint64(d.Get("service_id").(int)),
|
||||
Name: d.Get("compgroup_name").(string),
|
||||
Count: uint64(d.Get("comp_count").(int)),
|
||||
CPU: uint64(d.Get("cpu").(int)),
|
||||
RAM: uint64(d.Get("ram").(int)),
|
||||
Disk: uint64(d.Get("disk").(int)),
|
||||
ImageID: uint64(d.Get("image_id").(int)),
|
||||
Driver: d.Get("driver").(string),
|
||||
ServiceID: uint64(d.Get("service_id").(int)),
|
||||
Name: d.Get("compgroup_name").(string),
|
||||
Count: uint64(d.Get("comp_count").(int)),
|
||||
CPU: uint64(d.Get("cpu").(int)),
|
||||
RAM: uint64(d.Get("ram").(int)),
|
||||
Disk: uint64(d.Get("disk").(int)),
|
||||
ImageID: uint64(d.Get("image_id").(int)),
|
||||
Driver: d.Get("driver").(string),
|
||||
StoragePolicyID: uint64(d.Get("storage_policy_id").(int)),
|
||||
}
|
||||
|
||||
if role, ok := d.GetOk("role"); ok {
|
||||
@@ -396,10 +397,10 @@ func resourceBasicServiceGroupSchemaMake() map[string]*schema.Schema {
|
||||
Required: true,
|
||||
Description: "OS image ID to create computes from",
|
||||
},
|
||||
"driver": {
|
||||
Type: schema.TypeString,
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "compute driver like a KVM_X86, etc.",
|
||||
Description: "storage policy id of compute. The rules of the specified storage policy will be used.",
|
||||
},
|
||||
///4.4.0
|
||||
"sep_id": {
|
||||
@@ -425,6 +426,12 @@ func resourceBasicServiceGroupSchemaMake() map[string]*schema.Schema {
|
||||
ValidateFunc: validation.StringInSlice([]string{"i440fx", "Q35"}, false),
|
||||
Default: "i440fx",
|
||||
},
|
||||
"driver": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "compute driver like a KVM_X86, etc.",
|
||||
Default: "KVM_X86",
|
||||
},
|
||||
///
|
||||
"role": {
|
||||
Type: schema.TypeString,
|
||||
|
||||
@@ -85,6 +85,9 @@ func utilityBasicServiceListCheckPresence(ctx context.Context, d *schema.Resourc
|
||||
if acc_name, ok := d.GetOk("account_name"); ok {
|
||||
req.AccountName = acc_name.(string)
|
||||
}
|
||||
if zoneID, ok := d.GetOk("zone_id"); ok {
|
||||
req.ZoneID = uint64(zoneID.(int))
|
||||
}
|
||||
|
||||
log.Debugf("utilityBasicServiceListCheckPresence")
|
||||
basicServiceList, err := c.CloudAPI().BService().List(ctx, req)
|
||||
|
||||
@@ -439,6 +439,11 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
Description: "Disk status",
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
Description: "Storage policy ID",
|
||||
},
|
||||
"tech_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -462,6 +467,10 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"to_clean": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
|
||||
return rets
|
||||
|
||||
@@ -123,6 +123,11 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "Page size",
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "storage policy ID ",
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
@@ -508,6 +513,11 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
Description: "Disk status",
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
Description: "Storage policy ID",
|
||||
},
|
||||
"tech_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -531,6 +541,10 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"to_clean": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -128,6 +128,11 @@ func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "Page size",
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "storage policy ID ",
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
|
||||
@@ -494,6 +494,11 @@ func dataSourceDiskDeletedListSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
Description: "Disk status",
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
Description: "Storage policy ID",
|
||||
},
|
||||
"tech_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -517,6 +522,10 @@ func dataSourceDiskDeletedListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"to_clean": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -120,11 +120,13 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
|
||||
d.Set("shareable", disk.Shareable)
|
||||
d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots))
|
||||
d.Set("status", disk.Status)
|
||||
d.Set("storage_policy_id", disk.StoragePolicyID)
|
||||
d.Set("tech_status", disk.TechStatus)
|
||||
d.Set("type", disk.Type)
|
||||
d.Set("vmid", disk.VMID)
|
||||
d.Set("updated_by", disk.UpdatedBy)
|
||||
d.Set("updated_time", disk.UpdatedTime)
|
||||
d.Set("to_clean", disk.ToClean)
|
||||
}
|
||||
|
||||
func flattenDiskReplication(rep disks.ItemReplication) []map[string]interface{} {
|
||||
@@ -214,50 +216,52 @@ func flattenDiskList(dl *disks.ListDisks) []map[string]interface{} {
|
||||
for _, disk := range dl.Data {
|
||||
diskAcl, _ := json.Marshal(disk.ACL)
|
||||
temp := map[string]interface{}{
|
||||
"account_id": disk.AccountID,
|
||||
"account_name": disk.AccountName,
|
||||
"acl": string(diskAcl),
|
||||
"computes": flattenDiskComputes(disk.Computes),
|
||||
"created_by": disk.CreatedBy,
|
||||
"created_time": disk.CreatedTime,
|
||||
"deleted_by": disk.DeletedBy,
|
||||
"deleted_time": disk.DeletedTime,
|
||||
"desc": disk.Description,
|
||||
"destruction_time": disk.DestructionTime,
|
||||
"devicename": disk.DeviceName,
|
||||
"gid": disk.GID,
|
||||
"disk_id": disk.ID,
|
||||
"image_id": disk.ImageID,
|
||||
"images": disk.Images,
|
||||
"iotune": flattenIOTune(disk.IOTune),
|
||||
"machine_id": disk.MachineID,
|
||||
"machine_name": disk.MachineName,
|
||||
"milestones": disk.Milestones,
|
||||
"disk_name": disk.Name,
|
||||
"order": disk.Order,
|
||||
"params": disk.Params,
|
||||
"parent_id": disk.ParentID,
|
||||
"pci_slot": disk.PCISlot,
|
||||
"pool": disk.Pool,
|
||||
"present_to": disk.PresentTo,
|
||||
"purge_time": disk.PurgeTime,
|
||||
"replication": flattenDiskReplication(disk.Replication),
|
||||
"res_id": disk.ResID,
|
||||
"res_name": disk.ResName,
|
||||
"role": disk.Role,
|
||||
"sep_id": disk.SepID,
|
||||
"sep_type": disk.SepType,
|
||||
"shareable": disk.Shareable,
|
||||
"size_available": disk.SizeAvailable,
|
||||
"size_max": disk.SizeMax,
|
||||
"size_used": disk.SizeUsed,
|
||||
"snapshots": flattenDiskSnapshotList(disk.Snapshots),
|
||||
"status": disk.Status,
|
||||
"tech_status": disk.TechStatus,
|
||||
"type": disk.Type,
|
||||
"vmid": disk.VMID,
|
||||
"updated_by": disk.UpdatedBy,
|
||||
"updated_time": disk.UpdatedTime,
|
||||
"account_id": disk.AccountID,
|
||||
"account_name": disk.AccountName,
|
||||
"acl": string(diskAcl),
|
||||
"computes": flattenDiskComputes(disk.Computes),
|
||||
"created_by": disk.CreatedBy,
|
||||
"created_time": disk.CreatedTime,
|
||||
"deleted_by": disk.DeletedBy,
|
||||
"deleted_time": disk.DeletedTime,
|
||||
"desc": disk.Description,
|
||||
"destruction_time": disk.DestructionTime,
|
||||
"devicename": disk.DeviceName,
|
||||
"gid": disk.GID,
|
||||
"disk_id": disk.ID,
|
||||
"image_id": disk.ImageID,
|
||||
"images": disk.Images,
|
||||
"iotune": flattenIOTune(disk.IOTune),
|
||||
"machine_id": disk.MachineID,
|
||||
"machine_name": disk.MachineName,
|
||||
"milestones": disk.Milestones,
|
||||
"disk_name": disk.Name,
|
||||
"order": disk.Order,
|
||||
"params": disk.Params,
|
||||
"parent_id": disk.ParentID,
|
||||
"pci_slot": disk.PCISlot,
|
||||
"pool": disk.Pool,
|
||||
"present_to": disk.PresentTo,
|
||||
"purge_time": disk.PurgeTime,
|
||||
"replication": flattenDiskReplication(disk.Replication),
|
||||
"res_id": disk.ResID,
|
||||
"res_name": disk.ResName,
|
||||
"role": disk.Role,
|
||||
"sep_id": disk.SepID,
|
||||
"sep_type": disk.SepType,
|
||||
"shareable": disk.Shareable,
|
||||
"size_available": disk.SizeAvailable,
|
||||
"size_max": disk.SizeMax,
|
||||
"size_used": disk.SizeUsed,
|
||||
"snapshots": flattenDiskSnapshotList(disk.Snapshots),
|
||||
"status": disk.Status,
|
||||
"storage_policy_id": disk.StoragePolicyID,
|
||||
"tech_status": disk.TechStatus,
|
||||
"type": disk.Type,
|
||||
"vmid": disk.VMID,
|
||||
"updated_by": disk.UpdatedBy,
|
||||
"updated_time": disk.UpdatedTime,
|
||||
"to_clean": disk.ToClean,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
@@ -62,6 +62,7 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
|
||||
|
||||
req.AccountID = uint64(d.Get("account_id").(int))
|
||||
req.Name = d.Get("disk_name").(string)
|
||||
req.StoragePolicyID = uint64(d.Get("storage_policy_id").(int))
|
||||
req.Size = uint64(d.Get("size_max").(int))
|
||||
|
||||
if sepId, ok := d.GetOk("sep_id"); ok {
|
||||
@@ -235,6 +236,18 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("storage_policy_id") {
|
||||
req := disks.ChangeDiskStoragePolicyRequest{
|
||||
DiskID: disk.ID,
|
||||
StoragePolicyID: uint64(d.Get("storage_policy_id").(int)),
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().Disks().ChangeDiskStoragePolicy(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("size_max") {
|
||||
oldSize, newSize := d.GetChange("size_max")
|
||||
if oldSize.(int) < newSize.(int) {
|
||||
@@ -353,6 +366,11 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
//ForceNew: true,
|
||||
Description: "The unique ID of the subscriber-owner of the disk",
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "ID storage policy under which the disk will be created",
|
||||
},
|
||||
"created_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -776,6 +794,10 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"to_clean": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
|
||||
return rets
|
||||
|
||||
@@ -86,6 +86,9 @@ func utilityDiskListCheckPresence(ctx context.Context, d *schema.ResourceData, m
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
req.Size = uint64(size.(int))
|
||||
}
|
||||
if storagePolicyID, ok := d.GetOk("storage_policy_id"); ok {
|
||||
req.StoragePolicyID = uint64(storagePolicyID.(int))
|
||||
}
|
||||
|
||||
log.Debugf("utilityDiskListCheckPresence: load disk list")
|
||||
diskList, err := c.CloudAPI().Disks().List(ctx, req)
|
||||
|
||||
@@ -46,6 +46,9 @@ func utilityDiskListUnattachedCheckPresence(ctx context.Context, d *schema.Resou
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
req.Size = uint64(size.(int))
|
||||
}
|
||||
if storagePolicyID, ok := d.GetOk("storage_policy_id"); ok {
|
||||
req.StoragePolicyID = uint64(storagePolicyID.(int))
|
||||
}
|
||||
|
||||
log.Debugf("utilityDiskListUnattachedCheckPresence: load disk Unattached list")
|
||||
unattachedList, err := c.CloudAPI().Disks().ListUnattached(ctx, req)
|
||||
|
||||
@@ -112,6 +112,11 @@ func dataSourceExtnetListSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "Name of the openVswitch bridge",
|
||||
},
|
||||
"zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Zone ID",
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
|
||||
@@ -76,10 +76,12 @@ func utilityExtnetListCheckPresence(ctx context.Context, d *schema.ResourceData,
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
req.Size = uint64(size.(int))
|
||||
}
|
||||
|
||||
if ovsBridge, ok := d.GetOk("ovs_bridge"); ok {
|
||||
req.OVSBridge = ovsBridge.(string)
|
||||
}
|
||||
if zoneID, ok := d.GetOk("zone_id"); ok {
|
||||
req.ZoneID = uint64(zoneID.(int))
|
||||
}
|
||||
|
||||
log.Debugf("utilityExtnetListCheckPresence")
|
||||
extnetList, err := c.CloudAPI().ExtNet().List(ctx, req)
|
||||
|
||||
@@ -127,6 +127,10 @@ func dataSourceImageListSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "page number",
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"size": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
|
||||
@@ -60,7 +60,9 @@ func flattenImage(d *schema.ResourceData, img *image.RecordImage) {
|
||||
d.Set("size", img.Size)
|
||||
d.Set("snapshot_id", img.SnapshotID)
|
||||
d.Set("status", img.Status)
|
||||
d.Set("storage_policy_id", img.StoragePolicyID)
|
||||
d.Set("tech_status", img.TechStatus)
|
||||
d.Set("to_clean", img.ToClean)
|
||||
d.Set("type", img.Type)
|
||||
d.Set("username", img.Username)
|
||||
d.Set("version", img.Version)
|
||||
@@ -86,6 +88,7 @@ func flattenImageList(il *image.ListImages) []map[string]interface{} {
|
||||
"sep_id": img.SepID,
|
||||
"size": img.Size,
|
||||
"status": img.Status,
|
||||
"storage_policy_id": img.StoragePolicyID,
|
||||
"type": img.Type,
|
||||
"username": img.Username,
|
||||
"virtual": img.Virtual,
|
||||
|
||||
@@ -206,10 +206,18 @@ func dataSourceImageExtendSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"to_clean": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
|
||||
@@ -1,137 +1,141 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package image
|
||||
|
||||
import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
|
||||
func dataSourceImageSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Owner account id",
|
||||
},
|
||||
"architecture": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Image architecture",
|
||||
},
|
||||
"boot_type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Boot image type",
|
||||
},
|
||||
"bootable": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
Description: "Flag, true if image is bootable, otherwise - false",
|
||||
},
|
||||
"cdrom": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
Description: "Flag, true if image is cdrom image, otherwise - false",
|
||||
},
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Image description",
|
||||
},
|
||||
"drivers": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
Description: "Image drivers",
|
||||
},
|
||||
"hot_resize": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
Description: "Flag, true if image supports hot resize, else if not",
|
||||
},
|
||||
"image_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Image id",
|
||||
},
|
||||
"link_to": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "For virtual images, id image, which current image linked",
|
||||
},
|
||||
"image_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Image name",
|
||||
},
|
||||
"network_interface_naming": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"pool_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Image pool",
|
||||
},
|
||||
"sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Image storage endpoint id",
|
||||
},
|
||||
"size": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Image size",
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Image status",
|
||||
},
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Image type",
|
||||
},
|
||||
"username": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "username",
|
||||
},
|
||||
"virtual": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
Description: "True if image is virtula, otherwise - else",
|
||||
},
|
||||
}
|
||||
}
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package image
|
||||
|
||||
import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
|
||||
func dataSourceImageSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Owner account id",
|
||||
},
|
||||
"architecture": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Image architecture",
|
||||
},
|
||||
"boot_type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Boot image type",
|
||||
},
|
||||
"bootable": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
Description: "Flag, true if image is bootable, otherwise - false",
|
||||
},
|
||||
"cdrom": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
Description: "Flag, true if image is cdrom image, otherwise - false",
|
||||
},
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Image description",
|
||||
},
|
||||
"drivers": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
Description: "Image drivers",
|
||||
},
|
||||
"hot_resize": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
Description: "Flag, true if image supports hot resize, else if not",
|
||||
},
|
||||
"image_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Image id",
|
||||
},
|
||||
"link_to": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "For virtual images, id image, which current image linked",
|
||||
},
|
||||
"image_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Image name",
|
||||
},
|
||||
"network_interface_naming": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"pool_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Image pool",
|
||||
},
|
||||
"sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Image storage endpoint id",
|
||||
},
|
||||
"size": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Image size",
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Image status",
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Image type",
|
||||
},
|
||||
"username": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "username",
|
||||
},
|
||||
"virtual": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
Description: "True if image is virtula, otherwise - else",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@ package image
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/statefuncs"
|
||||
)
|
||||
|
||||
func resourceImageSchemaMake(sch map[string]*schema.Schema) map[string]*schema.Schema {
|
||||
@@ -73,6 +72,12 @@ func resourceImageSchemaMake(sch map[string]*schema.Schema) map[string]*schema.S
|
||||
Description: "Image type linux, windows or unknown",
|
||||
}
|
||||
|
||||
sch["storage_policy_id"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "ID of the storage policy",
|
||||
}
|
||||
|
||||
sch["hot_resize"] = &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
@@ -126,16 +131,6 @@ func resourceImageSchemaMake(sch map[string]*schema.Schema) map[string]*schema.S
|
||||
Description: "storage endpoint provider ID",
|
||||
}
|
||||
|
||||
sch["drivers"] = &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Required: true,
|
||||
StateFunc: statefuncs.StateFuncToUpper,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ValidateFunc: validation.StringInSlice([]string{"KVM_X86"}, false),
|
||||
},
|
||||
}
|
||||
|
||||
sch["network_interface_naming"] = &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
|
||||
@@ -50,6 +50,13 @@ func resourceImageVirtualSchemaMake(sch map[string]*schema.Schema) map[string]*s
|
||||
Description: "ID of real image to link this virtual image to upon creation",
|
||||
}
|
||||
|
||||
sch["account_id"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "account_id",
|
||||
}
|
||||
|
||||
sch["image_id"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
|
||||
@@ -63,20 +63,14 @@ func resourceImageCreate(ctx context.Context, d *schema.ResourceData, m interfac
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := image.CreateRequest{
|
||||
AccountID: uint64(d.Get("account_id").(int)),
|
||||
Name: d.Get("name").(string),
|
||||
URL: d.Get("url").(string),
|
||||
BootType: d.Get("boot_type").(string),
|
||||
ImageType: d.Get("type").(string),
|
||||
AccountID: uint64(d.Get("account_id").(int)),
|
||||
Name: d.Get("name").(string),
|
||||
URL: d.Get("url").(string),
|
||||
BootType: d.Get("boot_type").(string),
|
||||
ImageType: d.Get("type").(string),
|
||||
StoragePolicyID: uint64(d.Get("storage_policy_id").(int)),
|
||||
}
|
||||
|
||||
drivers := []string{}
|
||||
for _, driver := range d.Get("drivers").([]interface{}) {
|
||||
drivers = append(drivers, driver.(string))
|
||||
}
|
||||
|
||||
req.Drivers = drivers
|
||||
|
||||
if hotresize, ok := d.GetOk("hot_resize"); ok {
|
||||
req.HotResize = hotresize.(bool)
|
||||
}
|
||||
|
||||
@@ -490,6 +490,10 @@ func resourceImageFromBlankComputeSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"to_clean": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"version": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
|
||||
@@ -48,7 +48,6 @@ import (
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/tasks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/statefuncs"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
|
||||
)
|
||||
|
||||
@@ -95,11 +94,6 @@ func resourceImageFromPlatformDiskCreate(ctx context.Context, d *schema.Resource
|
||||
if poolName, ok := d.GetOk("pool_name"); ok {
|
||||
req.PoolName = poolName.(string)
|
||||
}
|
||||
if driversInterface, ok := d.GetOk("drivers"); ok {
|
||||
for _, d := range driversInterface.([]interface{}) {
|
||||
req.Drivers = append(req.Drivers, d.(string))
|
||||
}
|
||||
}
|
||||
if hotresize, ok := d.GetOk("hot_resize"); ok {
|
||||
req.HotResize = hotresize.(bool)
|
||||
}
|
||||
@@ -320,16 +314,6 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
Description: "pool for image create",
|
||||
},
|
||||
"drivers": {
|
||||
Type: schema.TypeList,
|
||||
Required: true,
|
||||
Elem: &schema.Schema{
|
||||
StateFunc: statefuncs.StateFuncToUpper,
|
||||
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86"}, false), // observe case while validating
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]",
|
||||
},
|
||||
"bootable": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
@@ -385,6 +369,13 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"drivers": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"enabled": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
@@ -483,6 +474,10 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"to_clean": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"version": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
|
||||
@@ -49,8 +49,9 @@ func resourceImageVirtualCreate(ctx context.Context, d *schema.ResourceData, m i
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := image.CreateVirtualRequest{
|
||||
Name: d.Get("name").(string),
|
||||
TargetID: uint64(d.Get("link_to").(int)),
|
||||
Name: d.Get("name").(string),
|
||||
TargetID: uint64(d.Get("link_to").(int)),
|
||||
AccountID: uint64(d.Get("account_id").(int)),
|
||||
}
|
||||
|
||||
imageId, err := c.CloudAPI().Image().CreateVirtual(ctx, req)
|
||||
|
||||
@@ -103,6 +103,9 @@ func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData,
|
||||
if enabled, ok := d.GetOkExists("enabled"); ok {
|
||||
req.Enabled = enabled.(bool)
|
||||
}
|
||||
if storagePolicyID, ok := d.GetOk("storage_policy_id"); ok {
|
||||
req.StoragePolicyID = uint64(storagePolicyID.(int))
|
||||
}
|
||||
|
||||
log.Debugf("utilityImageListCheckPresence: load image list")
|
||||
imageList, err := c.CloudAPI().Image().List(ctx, req)
|
||||
|
||||
@@ -192,6 +192,11 @@ func createK8sListSchema() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Zone ID",
|
||||
},
|
||||
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
|
||||
@@ -92,6 +92,7 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
|
||||
createReq.K8SCIID = uint64(d.Get("k8sci_id").(int))
|
||||
createReq.WorkerGroupName = d.Get("wg_name").(string)
|
||||
createReq.NetworkPlugin = d.Get("network_plugin").(string)
|
||||
createReq.StoragePolicyID = uint64(d.Get("storage_policy_id").(int))
|
||||
|
||||
var masterNode K8sNodeRecord
|
||||
if masters, ok := d.GetOk("masters"); ok {
|
||||
@@ -665,6 +666,11 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
|
||||
Description: "Network plugin to be used",
|
||||
ValidateFunc: validation.StringInSlice([]string{"flannel", "weavenet", "calico"}, true),
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "ID of the storage policy",
|
||||
},
|
||||
"zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
|
||||
@@ -95,6 +95,7 @@ func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interfac
|
||||
createReq.K8SCIID = uint64(d.Get("k8sci_id").(int))
|
||||
createReq.WorkerGroupName = "temp"
|
||||
createReq.NetworkPlugin = d.Get("network_plugin").(string)
|
||||
createReq.StoragePolicyID = uint64(d.Get("storage_policy_id").(int))
|
||||
|
||||
if zoneID, ok := d.GetOk("zone_id"); ok {
|
||||
createReq.ZoneID = uint64(zoneID.(int))
|
||||
@@ -680,6 +681,11 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
|
||||
Description: "Network plugin to be used",
|
||||
ValidateFunc: validation.StringInSlice([]string{"flannel", "weavenet", "calico"}, true),
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "ID of the storage policy",
|
||||
},
|
||||
"zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
|
||||
@@ -65,14 +65,15 @@ func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interfac
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := k8s.WorkersGroupAddRequest{
|
||||
K8SID: uint64(d.Get("k8s_id").(int)),
|
||||
Name: d.Get("name").(string),
|
||||
WorkerNum: uint64(d.Get("num").(int)),
|
||||
WorkerCPU: uint64(d.Get("cpu").(int)),
|
||||
WorkerRAM: uint64(d.Get("ram").(int)),
|
||||
WorkerSEPID: uint64(d.Get("worker_sep_id").(int)),
|
||||
WorkerSEPPool: d.Get("worker_sep_pool").(string),
|
||||
Chipset: d.Get("chipset").(string),
|
||||
K8SID: uint64(d.Get("k8s_id").(int)),
|
||||
Name: d.Get("name").(string),
|
||||
WorkerNum: uint64(d.Get("num").(int)),
|
||||
WorkerCPU: uint64(d.Get("cpu").(int)),
|
||||
WorkerRAM: uint64(d.Get("ram").(int)),
|
||||
WorkerSEPID: uint64(d.Get("worker_sep_id").(int)),
|
||||
WorkerSEPPool: d.Get("worker_sep_pool").(string),
|
||||
Chipset: d.Get("chipset").(string),
|
||||
StoragePolicyID: uint64(d.Get("storage_policy_id").(int)),
|
||||
}
|
||||
|
||||
labels, _ := d.Get("labels").([]interface{})
|
||||
@@ -276,6 +277,12 @@ func resourceK8sWgSchemaMake() map[string]*schema.Schema {
|
||||
Description: "Name of the worker group.",
|
||||
},
|
||||
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "ID of the storage policy",
|
||||
},
|
||||
|
||||
"num": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
|
||||
@@ -55,12 +55,6 @@ func utilityK8CIListCheckPresence(ctx context.Context, d *schema.ResourceData, m
|
||||
if status, ok := d.GetOk("status"); ok {
|
||||
req.Status = status.(string)
|
||||
}
|
||||
if worker_driver, ok := d.GetOk("worker_driver"); ok {
|
||||
req.WorkerDriver = worker_driver.(string)
|
||||
}
|
||||
if master_driver, ok := d.GetOk("master_driver"); ok {
|
||||
req.MasterDriver = master_driver.(string)
|
||||
}
|
||||
if network_plugin, ok := d.GetOk("network_plugin"); ok {
|
||||
req.NetworkPlugins = network_plugin.(string)
|
||||
}
|
||||
|
||||
@@ -278,6 +278,9 @@ func utilityK8sListCheckPresence(ctx context.Context, d *schema.ResourceData, m
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
req.Size = uint64(size.(int))
|
||||
}
|
||||
if zoneID, ok := d.GetOk("zone_id"); ok {
|
||||
req.ZoneID = uint64(zoneID.(int))
|
||||
}
|
||||
|
||||
k8sList, err := c.CloudAPI().K8S().List(ctx, req)
|
||||
if err != nil {
|
||||
|
||||
@@ -441,6 +441,10 @@ func computeListDisksSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -449,6 +453,10 @@ func computeListDisksSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"to_clean": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -495,6 +503,10 @@ func computeInterfacesSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"enable_secgroups": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"flip_group_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -561,6 +573,13 @@ func computeInterfacesSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"security_groups": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"target": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -726,6 +745,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"boot_image_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"cd_image_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -1125,6 +1148,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"os_version": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -50,6 +50,7 @@ func dataSourceComputeAuditsRead(ctx context.Context, d *schema.ResourceData, m
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
d.Set("items", flattenComputeAudits(computeAudits))
|
||||
d.Set("entry_count", computeAudits.EntryCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -59,6 +60,42 @@ func dataSourceComputeAuditsSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"timestamp_at": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"timestamp_to": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"user": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"call": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"sort_by": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"page": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"size": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"min_status_code": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"max_status_code": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
@@ -88,6 +125,10 @@ func dataSourceComputeAuditsSchemaMake() map[string]*schema.Schema {
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -136,6 +136,10 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"boot_image_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"cd_image_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -273,6 +277,10 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"os_version": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"pinned": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
@@ -514,6 +522,11 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
|
||||
Default: false,
|
||||
Description: "If set to true, ignores any VMs associated with any k8s cluster",
|
||||
},
|
||||
"zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Zone ID",
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
|
||||
@@ -75,6 +75,7 @@ func flattenInterfaces(interfaces compute.ListInterfaces) []map[string]interface
|
||||
"conn_type": interfaceItem.ConnType,
|
||||
"def_gw": interfaceItem.DefGW,
|
||||
"enabled": interfaceItem.Enabled,
|
||||
"enable_secgroups": interfaceItem.EnableSecGroups,
|
||||
"flip_group_id": interfaceItem.FLIPGroupID,
|
||||
"guid": interfaceItem.GUID,
|
||||
"ip_address": interfaceItem.IPAddress,
|
||||
@@ -88,6 +89,7 @@ func flattenInterfaces(interfaces compute.ListInterfaces) []map[string]interface
|
||||
"node_id": interfaceItem.NodeID,
|
||||
"pci_slot": interfaceItem.PCISlot,
|
||||
"qos": flattenQOS(interfaceItem.QOS),
|
||||
"security_groups": interfaceItem.SecGroups,
|
||||
"sdn_interface_id": interfaceItem.SDNInterfaceID,
|
||||
"target": interfaceItem.Target,
|
||||
"type": interfaceItem.Type,
|
||||
@@ -177,38 +179,40 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
|
||||
customFields, _ := json.Marshal(compute.CustomFields)
|
||||
devices, _ := json.Marshal(compute.Devices)
|
||||
temp := map[string]interface{}{
|
||||
"acl": flattenListACL(compute.ACL),
|
||||
"account_id": compute.AccountID,
|
||||
"account_name": compute.AccountName,
|
||||
"affinity_label": compute.AffinityLabel,
|
||||
"affinity_rules": flattenListRules(compute.AffinityRules),
|
||||
"affinity_weight": compute.AffinityWeight,
|
||||
"anti_affinity_rules": flattenListRules(compute.AntiAffinityRules),
|
||||
"arch": compute.Architecture,
|
||||
"auto_start_w_node": compute.AutoStart,
|
||||
"boot_order": compute.BootOrder,
|
||||
"bootdisk_size": compute.BootDiskSize,
|
||||
"chipset": compute.Chipset,
|
||||
"cd_image_id": compute.CdImageId,
|
||||
"clone_reference": compute.CloneReference,
|
||||
"clones": compute.Clones,
|
||||
"computeci_id": compute.ComputeCIID,
|
||||
"cpu_pin": compute.CPUPin,
|
||||
"cpus": compute.CPU,
|
||||
"created_by": compute.CreatedBy,
|
||||
"created_time": compute.CreatedTime,
|
||||
"custom_fields": string(customFields),
|
||||
"deleted_by": compute.DeletedBy,
|
||||
"deleted_time": compute.DeletedTime,
|
||||
"desc": compute.Description,
|
||||
"devices": string(devices),
|
||||
"disks": flattenDisks(compute.Disks),
|
||||
"driver": compute.Driver,
|
||||
"gid": compute.GID,
|
||||
"guid": compute.GUID,
|
||||
"hp_backed": compute.HPBacked,
|
||||
"compute_id": compute.ID,
|
||||
"image_id": compute.ImageID,
|
||||
"acl": flattenListACL(compute.ACL),
|
||||
"account_id": compute.AccountID,
|
||||
"account_name": compute.AccountName,
|
||||
"affinity_label": compute.AffinityLabel,
|
||||
"affinity_rules": flattenListRules(compute.AffinityRules),
|
||||
"affinity_weight": compute.AffinityWeight,
|
||||
"anti_affinity_rules": flattenListRules(compute.AntiAffinityRules),
|
||||
"arch": compute.Architecture,
|
||||
"auto_start_w_node": compute.AutoStart,
|
||||
"boot_order": compute.BootOrder,
|
||||
"bootdisk_size": compute.BootDiskSize,
|
||||
"boot_image_id": compute.BootImageID,
|
||||
"chipset": compute.Chipset,
|
||||
"cd_image_id": compute.CdImageId,
|
||||
"clone_reference": compute.CloneReference,
|
||||
"clones": compute.Clones,
|
||||
"computeci_id": compute.ComputeCIID,
|
||||
"cpu_pin": compute.CPUPin,
|
||||
"cpus": compute.CPU,
|
||||
"created_by": compute.CreatedBy,
|
||||
"created_time": compute.CreatedTime,
|
||||
"custom_fields": string(customFields),
|
||||
"deleted_by": compute.DeletedBy,
|
||||
"deleted_time": compute.DeletedTime,
|
||||
"desc": compute.Description,
|
||||
"devices": string(devices),
|
||||
"disks": flattenDisks(compute.Disks),
|
||||
"driver": compute.Driver,
|
||||
"gid": compute.GID,
|
||||
"guid": compute.GUID,
|
||||
"hp_backed": compute.HPBacked,
|
||||
"compute_id": compute.ID,
|
||||
//TODO
|
||||
// "image_id": compute.ImageID,
|
||||
"interfaces": flattenInterfaces(compute.Interfaces),
|
||||
"live_migration_job_id": compute.LiveMigrationJobID,
|
||||
"lock_status": compute.LockStatus,
|
||||
@@ -220,33 +224,36 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
|
||||
"need_reboot": compute.NeedReboot,
|
||||
"numa_affinity": compute.NumaAffinity,
|
||||
"numa_node_id": compute.NumaNodeId,
|
||||
"os_version": compute.OSVersion,
|
||||
// "pinned": compute.Pinned,
|
||||
"preferred_cpu": compute.PreferredCPU,
|
||||
"qemu_guest": flattenQemuQuest(compute.QemuQuest),
|
||||
"ram": compute.RAM,
|
||||
"reference_id": compute.ReferenceID,
|
||||
"registered": compute.Registered,
|
||||
"res_name": compute.ResName,
|
||||
"reserved_node_cpus": compute.ReservedNodeCpus,
|
||||
"rg_id": compute.RGID,
|
||||
"rg_name": compute.RGName,
|
||||
"snap_sets": flattenSnapSets(compute.SnapSets),
|
||||
"stateless_sep_id": compute.StatelessSepID,
|
||||
"stateless_sep_type": compute.StatelessSepType,
|
||||
"status": compute.Status,
|
||||
"tags": flattenTags(compute.Tags),
|
||||
"tech_status": compute.TechStatus,
|
||||
"total_disk_size": compute.TotalDiskSize,
|
||||
"updated_by": compute.UpdatedBy,
|
||||
"updated_time": compute.UpdatedTime,
|
||||
"user_managed": compute.UserManaged,
|
||||
"vgpus": compute.VGPUs,
|
||||
"vins_connected": compute.VINSConnected,
|
||||
"virtual_image_id": compute.VirtualImageID,
|
||||
"preferred_cpu": compute.PreferredCPU,
|
||||
"qemu_guest": flattenQemuQuest(compute.QemuQuest),
|
||||
"ram": compute.RAM,
|
||||
"reference_id": compute.ReferenceID,
|
||||
"registered": compute.Registered,
|
||||
"res_name": compute.ResName,
|
||||
"reserved_node_cpus": compute.ReservedNodeCpus,
|
||||
"rg_id": compute.RGID,
|
||||
"rg_name": compute.RGName,
|
||||
"snap_sets": flattenSnapSets(compute.SnapSets),
|
||||
"stateless_sep_id": compute.StatelessSepID,
|
||||
"stateless_sep_type": compute.StatelessSepType,
|
||||
"status": compute.Status,
|
||||
"tags": flattenTags(compute.Tags),
|
||||
"tech_status": compute.TechStatus,
|
||||
"total_disk_size": compute.TotalDiskSize,
|
||||
"updated_by": compute.UpdatedBy,
|
||||
"updated_time": compute.UpdatedTime,
|
||||
"user_managed": compute.UserManaged,
|
||||
"vgpus": compute.VGPUs,
|
||||
"vins_connected": compute.VINSConnected,
|
||||
//TODO
|
||||
// "virtual_image_id": compute.VirtualImageID,
|
||||
"loader_type": compute.LoaderType,
|
||||
"boot_type": compute.BootType,
|
||||
"hot_resize": compute.HotResize,
|
||||
"network_interface_naming": compute.NetworkInterfaceNaming,
|
||||
"zone_id": compute.ZoneID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
@@ -258,18 +265,20 @@ func flattenBootDisk(bootDisk *compute.ItemComputeDisk) []map[string]interface{}
|
||||
res := make([]map[string]interface{}, 0)
|
||||
|
||||
temp := map[string]interface{}{
|
||||
"disk_name": bootDisk.Name,
|
||||
"disk_id": bootDisk.ID,
|
||||
"disk_type": bootDisk.Type,
|
||||
"sep_id": bootDisk.SepID,
|
||||
"shareable": bootDisk.Shareable,
|
||||
"size_max": bootDisk.SizeMax,
|
||||
"size_used": bootDisk.SizeUsed,
|
||||
"pool": bootDisk.Pool,
|
||||
"desc": bootDisk.Description,
|
||||
"image_id": bootDisk.ImageID,
|
||||
"size": bootDisk.SizeMax,
|
||||
"present_to": bootDisk.PresentTo,
|
||||
"disk_name": bootDisk.Name,
|
||||
"disk_id": bootDisk.ID,
|
||||
"disk_type": bootDisk.Type,
|
||||
"sep_id": bootDisk.SepID,
|
||||
"shareable": bootDisk.Shareable,
|
||||
"size_max": bootDisk.SizeMax,
|
||||
"size_used": bootDisk.SizeUsed,
|
||||
"pool": bootDisk.Pool,
|
||||
"desc": bootDisk.Description,
|
||||
"image_id": bootDisk.ImageID,
|
||||
"size": bootDisk.SizeMax,
|
||||
"present_to": bootDisk.PresentTo,
|
||||
"storage_policy_id": bootDisk.StoragePolicyID,
|
||||
"to_clean": bootDisk.ToClean,
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
@@ -298,19 +307,20 @@ func flattenComputeDisksDemo(disksList compute.ListComputeDisks, disksBlocks, ex
|
||||
pernamentlyValue := disksBlocks[indexDataDisks].(map[string]interface{})["permanently"].(bool)
|
||||
|
||||
temp := map[string]interface{}{
|
||||
"disk_name": disk.Name,
|
||||
"disk_id": disk.ID,
|
||||
"disk_type": disk.Type,
|
||||
"sep_id": disk.SepID,
|
||||
"shareable": disk.Shareable,
|
||||
"size_max": disk.SizeMax,
|
||||
"size_used": disk.SizeUsed,
|
||||
"pool": disk.Pool,
|
||||
"desc": disk.Description,
|
||||
"image_id": disk.ImageID,
|
||||
"size": disk.SizeMax,
|
||||
"permanently": pernamentlyValue,
|
||||
"present_to": disk.PresentTo,
|
||||
"disk_name": disk.Name,
|
||||
"disk_id": disk.ID,
|
||||
"disk_type": disk.Type,
|
||||
"sep_id": disk.SepID,
|
||||
"shareable": disk.Shareable,
|
||||
"size_max": disk.SizeMax,
|
||||
"size_used": disk.SizeUsed,
|
||||
"pool": disk.Pool,
|
||||
"desc": disk.Description,
|
||||
"image_id": disk.ImageID,
|
||||
"size": disk.SizeMax,
|
||||
"permanently": pernamentlyValue,
|
||||
"present_to": disk.PresentTo,
|
||||
"storage_policy_id": disk.StoragePolicyID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
indexDataDisks++
|
||||
@@ -331,6 +341,7 @@ func flattenNetwork(networks []interface{}, interfaces compute.ListInterfaces) [
|
||||
"mtu": network.MTU,
|
||||
"sdn_interface_id": network.SDNInterfaceID,
|
||||
"weight": flattenNetworkWeight(networks, network.NetID, network.NetType),
|
||||
"enabled": network.Enabled,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
@@ -381,6 +392,7 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute, pc
|
||||
d.Set("boot_disk_size", bootDisk.SizeMax)
|
||||
d.Set("boot_disk", flattenBootDisk(bootDisk))
|
||||
d.Set("boot_disk_id", bootDisk.ID)
|
||||
d.Set("boot_image_id", computeRec.BootImageID)
|
||||
d.Set("cd_image_id", computeRec.CdImageId)
|
||||
d.Set("sep_id", bootDisk.SepID)
|
||||
d.Set("pool", bootDisk.Pool)
|
||||
@@ -404,11 +416,12 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute, pc
|
||||
d.Set("gid", computeRec.GID)
|
||||
d.Set("guid", computeRec.GUID)
|
||||
d.Set("compute_id", computeRec.ID)
|
||||
if computeRec.VirtualImageID != 0 {
|
||||
d.Set("image_id", computeRec.VirtualImageID)
|
||||
} else {
|
||||
d.Set("image_id", computeRec.ImageID)
|
||||
}
|
||||
//TODO
|
||||
// if computeRec.VirtualImageID != 0 {
|
||||
// d.Set("image_id", computeRec.VirtualImageID)
|
||||
// } else {
|
||||
// d.Set("image_id", computeRec.ImageID)
|
||||
// }
|
||||
d.Set("interfaces", flattenInterfaces(computeRec.Interfaces))
|
||||
d.Set("lock_status", computeRec.LockStatus)
|
||||
d.Set("manager_id", computeRec.ManagerID)
|
||||
@@ -446,8 +459,9 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute, pc
|
||||
d.Set("user_managed", computeRec.UserManaged)
|
||||
d.Set("vnc_password", computeRec.VNCPassword)
|
||||
d.Set("vgpus", flattenVGPUs(computeRec.VGPUs))
|
||||
d.Set("virtual_image_id", computeRec.VirtualImageID)
|
||||
d.Set("virtual_image_name", computeRec.VirtualImageName)
|
||||
//TODO
|
||||
// d.Set("virtual_image_id", computeRec.VirtualImageID)
|
||||
// d.Set("virtual_image_name", computeRec.VirtualImageName)
|
||||
d.Set("loader_type", computeRec.LoaderType)
|
||||
d.Set("boot_type", computeRec.BootType)
|
||||
d.Set("hot_resize", computeRec.HotResize)
|
||||
@@ -467,6 +481,8 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute, pc
|
||||
d.Set("network", flattenNetwork(d.Get("network").(*schema.Set).List(), computeRec.Interfaces))
|
||||
d.Set("pci_devices", flattenPCI(*pciList))
|
||||
|
||||
d.Set("os_version", computeRec.OSVersion)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -581,8 +597,10 @@ func flattenListComputeDisks(disks compute.ListComputeDisks) []map[string]interf
|
||||
"size_used": disk.SizeUsed,
|
||||
"snapshots": flattenSnapshots(disk.Snapshots),
|
||||
"status": disk.Status,
|
||||
"storage_policy_id": disk.StoragePolicyID,
|
||||
"tech_status": disk.TechStatus,
|
||||
"type": disk.Type,
|
||||
"to_clean": disk.ToClean,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
@@ -638,6 +656,7 @@ func flattenDataCompute(d *schema.ResourceData, computeRec compute.RecordCompute
|
||||
d.Set("chipset", computeRec.Chipset)
|
||||
d.Set("boot_order", computeRec.BootOrder)
|
||||
d.Set("bootdisk_size", computeRec.BootDiskSize)
|
||||
d.Set("boot_image_id", computeRec.BootImageID)
|
||||
d.Set("cd_image_id", computeRec.CdImageId)
|
||||
d.Set("clone_reference", computeRec.CloneReference)
|
||||
d.Set("clones", computeRec.Clones)
|
||||
@@ -698,14 +717,16 @@ func flattenDataCompute(d *schema.ResourceData, computeRec compute.RecordCompute
|
||||
d.Set("userdata", string(userdata))
|
||||
d.Set("vnc_password", computeRec.VNCPassword)
|
||||
d.Set("vgpus", flattenVGPUs(computeRec.VGPUs))
|
||||
d.Set("virtual_image_id", computeRec.VirtualImageID)
|
||||
d.Set("virtual_image_name", computeRec.VirtualImageName)
|
||||
//TODO
|
||||
// d.Set("virtual_image_id", computeRec.VirtualImageID)
|
||||
// d.Set("virtual_image_name", computeRec.VirtualImageName)
|
||||
d.Set("pci_devices", flattenPCI(*pciList))
|
||||
d.Set("loader_type", computeRec.LoaderType)
|
||||
d.Set("boot_type", computeRec.BootType)
|
||||
d.Set("hot_resize", computeRec.HotResize)
|
||||
d.Set("network_interface_naming", computeRec.NetworkInterfaceNaming)
|
||||
d.Set("zone_id", computeRec.ZoneID)
|
||||
d.Set("os_version", computeRec.OSVersion)
|
||||
}
|
||||
|
||||
func flattenPCI(pciList compute.ListPCIDevices) []uint64 {
|
||||
@@ -719,8 +740,8 @@ func flattenPCI(pciList compute.ListPCIDevices) []uint64 {
|
||||
}
|
||||
|
||||
func flattenComputeAudits(computeAudits compute.ListAudits) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(computeAudits))
|
||||
for _, computeAudit := range computeAudits {
|
||||
res := make([]map[string]interface{}, 0, len(computeAudits.Data))
|
||||
for _, computeAudit := range computeAudits.Data {
|
||||
temp := map[string]interface{}{
|
||||
"call": computeAudit.Call,
|
||||
"responsetime": computeAudit.ResponseTime,
|
||||
|
||||
10
internal/service/cloudapi/kvmvm/models.go
Normal file
10
internal/service/cloudapi/kvmvm/models.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package kvmvm
|
||||
|
||||
type updatedNetwork struct {
|
||||
DetachMap []map[string]interface{}
|
||||
ChangeIPMap []map[string]interface{}
|
||||
ChangeMacMap []map[string]interface{}
|
||||
ChangeMTUMap []map[string]interface{}
|
||||
AttachMap []map[string]interface{}
|
||||
EnableMap []map[string]interface{}
|
||||
}
|
||||
@@ -177,6 +177,13 @@ func networkSubresourceSchemaMake() map[string]*schema.Schema {
|
||||
DiffSuppressFunc: networkSubresIPAddreDiffSupperss,
|
||||
Description: "unique_identifier of LogicalPort on SDN side",
|
||||
},
|
||||
|
||||
"enabled": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "network enable flag",
|
||||
},
|
||||
}
|
||||
return rets
|
||||
}
|
||||
|
||||
@@ -35,6 +35,8 @@ package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -172,6 +174,10 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
NetID: uint64(netInterfaceVal["net_id"].(int)),
|
||||
}
|
||||
|
||||
if enabledNetwork(d.GetRawConfig().GetAttr("network"), reqInterface.NetID, reqInterface.NetType) {
|
||||
reqInterface.Enabled = netInterfaceVal["enabled"].(bool)
|
||||
}
|
||||
|
||||
if reqInterface.NetType == "DPDK" || reqInterface.NetType == "EXTNET" {
|
||||
reqInterface.MTU = uint64(netInterfaceVal["mtu"].(int))
|
||||
}
|
||||
@@ -204,8 +210,9 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
for _, elem := range disks.([]interface{}) {
|
||||
diskVal := elem.(map[string]interface{})
|
||||
reqDataDisk := kvmx86.DataDisk{
|
||||
DiskName: diskVal["disk_name"].(string),
|
||||
Size: uint64(diskVal["size"].(int)),
|
||||
DiskName: diskVal["disk_name"].(string),
|
||||
Size: uint64(diskVal["size"].(int)),
|
||||
StoragePolicyID: uint64(diskVal["storage_policy_id"].(int)),
|
||||
}
|
||||
if sepId, ok := diskVal["sep_id"]; ok {
|
||||
reqDataDisk.SepID = uint64(sepId.(int))
|
||||
@@ -235,14 +242,12 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
|
||||
var computeId uint64
|
||||
driver := d.Get("driver").(string)
|
||||
|
||||
createReqX86.RGID = uint64(d.Get("rg_id").(int))
|
||||
createReqX86.Name = d.Get("name").(string)
|
||||
createReqX86.CPU = uint64(d.Get("cpu").(int))
|
||||
createReqX86.RAM = uint64(d.Get("ram").(int))
|
||||
|
||||
createReqX86.Driver = driver
|
||||
createReqX86.StoragePolicyID = uint64(d.Get("storage_policy_id").(int))
|
||||
|
||||
if image, ok := d.GetOk("image_id"); ok {
|
||||
createReqX86.ImageID = uint64(image.(int))
|
||||
@@ -278,6 +283,10 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
}
|
||||
|
||||
if osVersion, ok := d.GetOk("os_version"); ok {
|
||||
createReqX86.OSVersion = osVersion.(string)
|
||||
}
|
||||
|
||||
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
|
||||
apiResp, err := c.CloudAPI().KVMX86().Create(ctx, createReqX86)
|
||||
if err != nil {
|
||||
@@ -289,6 +298,11 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
|
||||
warnings := dc.Warnings{}
|
||||
|
||||
simpleCompRec, err := utilityComputeCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
warnings.Add(err)
|
||||
}
|
||||
|
||||
cleanup := false
|
||||
defer func() {
|
||||
if cleanup {
|
||||
@@ -312,7 +326,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
|
||||
loaderType, loaderTypeOk := d.GetOk("loader_type")
|
||||
bootType, bootTypeOk := d.GetOk("boot_type")
|
||||
hotResize, hotResizeOk := d.GetOk("hot_resize")
|
||||
hotResize, hotResizeOk := d.GetOkExists("hot_resize")
|
||||
networkInterfaceNaming, networkInterfaceNamingOk := d.GetOk("network_interface_naming")
|
||||
|
||||
if loaderTypeOk {
|
||||
@@ -388,14 +402,42 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
}
|
||||
|
||||
if d.Get("pin_to_stack").(bool) {
|
||||
req := compute.PinToStackRequest{
|
||||
ComputeID: computeId,
|
||||
}
|
||||
req.AutoStart = d.Get("auto_start_w_node").(bool)
|
||||
_, err := c.CloudAPI().Compute().PinToStack(ctx, req)
|
||||
if err != nil {
|
||||
warnings.Add(err)
|
||||
if secGroups, ok := d.GetOk("security_groups"); ok {
|
||||
if secGroups.(*schema.Set).Len() > 0 {
|
||||
sgl := secGroups.(*schema.Set).List()
|
||||
for _, elem := range sgl {
|
||||
secGroupsMap := elem.(map[string]interface{})
|
||||
|
||||
netType := secGroupsMap["net_type"].(string)
|
||||
netId := uint64(secGroupsMap["net_id"].(int))
|
||||
var mac string
|
||||
for _, iface := range simpleCompRec.Interfaces {
|
||||
if iface.NetID == netId && iface.NetType == netType {
|
||||
mac = iface.MAC
|
||||
break
|
||||
}
|
||||
}
|
||||
if mac == "" {
|
||||
warnings.Add(errors.New(fmt.Sprintf("add security groups: Network with type %s and id %d is not connected to the compute %d", netType, netId, computeId)))
|
||||
continue
|
||||
}
|
||||
secGroupsIDs := make([]uint64, 0)
|
||||
for _, id := range secGroupsMap["security_groups"].(*schema.Set).List() {
|
||||
secGroupsIDs = append(secGroupsIDs, uint64(id.(int)))
|
||||
}
|
||||
log.Debugf("resourceComputeCreate: Configure security groups interface parameters on Network with type %s and id %d", netType, netId)
|
||||
req := compute.ChangeSecGroupsRequest{
|
||||
ComputeID: computeId,
|
||||
Interface: mac,
|
||||
SecGroups: secGroupsIDs,
|
||||
EnableSecGroups: secGroupsMap["enable_secgroups"].(bool),
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().Compute().ChangeSecGroups(ctx, req)
|
||||
if err != nil {
|
||||
warnings.Add(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -418,6 +460,22 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
}
|
||||
|
||||
if d.Get("pin_to_stack").(bool) {
|
||||
if !d.Get("started").(bool) {
|
||||
warnings.Add(errors.New("cannot pin to stack a VM, VM should be started"))
|
||||
}
|
||||
if d.Get("started").(bool) {
|
||||
req := compute.PinToStackRequest{
|
||||
ComputeID: computeId,
|
||||
}
|
||||
req.AutoStart = d.Get("auto_start_w_node").(bool)
|
||||
_, err = c.CloudAPI().Compute().PinToStack(ctx, req)
|
||||
if err != nil {
|
||||
warnings.Add(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if affinityLabel, ok := d.GetOk("affinity_label"); ok {
|
||||
req := compute.AffinityLabelSetRequest{
|
||||
ComputeID: computeId,
|
||||
@@ -899,21 +957,36 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
}
|
||||
if !oldPin.(bool) {
|
||||
if !d.Get("started").(bool) {
|
||||
return diag.Errorf("Cannot pin to stack a VM, that is not started")
|
||||
}
|
||||
reqToStart := compute.StartRequest{
|
||||
ComputeID: computeRec.ID,
|
||||
}
|
||||
_, err := c.CloudAPI().Compute().Start(ctx, reqToStart)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
req := compute.PinToStackRequest{
|
||||
ComputeID: computeRec.ID,
|
||||
}
|
||||
req.AutoStart = d.Get("auto_start_w_node").(bool)
|
||||
_, err := c.CloudAPI().Compute().PinToStack(ctx, req)
|
||||
_, err = c.CloudAPI().Compute().PinToStack(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Note bene: numa_affinity, cpu_pin, cpu, ram and hp_backed are not allowed to be changed for compute in STARTED tech status.
|
||||
// If STARTED, we need to stop it before update
|
||||
// Note bene: numa_affinity, cpu_pin and hp_backed are not allowed to be changed for compute in STARTED tech status.
|
||||
var isStopRequired bool
|
||||
if d.HasChanges("numa_affinity", "cpu_pin", "hp_backed", "chipset", "preferred_cpu", "cpu", "ram") && d.Get("started").(bool) {
|
||||
if d.HasChanges("numa_affinity", "cpu_pin", "hp_backed", "chipset", "preferred_cpu", "hot_resize") && d.Get("started").(bool) {
|
||||
isStopRequired = true
|
||||
}
|
||||
|
||||
old, new := d.GetChange("cpu")
|
||||
if old.(int) > new.(int) && d.Get("started").(bool) && d.Get("force_resize").(bool) {
|
||||
isStopRequired = true
|
||||
}
|
||||
if isStopRequired {
|
||||
@@ -989,7 +1062,8 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
"loader_type",
|
||||
"boot_type",
|
||||
"hot_resize",
|
||||
"network_interface_naming") {
|
||||
"network_interface_naming",
|
||||
"os_version") {
|
||||
req := compute.UpdateRequest{
|
||||
ComputeID: computeRec.ID,
|
||||
}
|
||||
@@ -1041,6 +1115,10 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
req.NetworkInterfaceNaming = d.Get("network_interface_naming").(string)
|
||||
}
|
||||
|
||||
if d.HasChange("os_version") {
|
||||
req.OSVersion = d.Get("os_version").(string)
|
||||
}
|
||||
|
||||
// perform update
|
||||
if _, err := c.CloudAPI().Compute().Update(ctx, req); err != nil {
|
||||
return diag.FromErr(err)
|
||||
@@ -1062,18 +1140,26 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("security_groups") {
|
||||
err = utilityComputeSecGroupsConfigure(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("disks") {
|
||||
deletedDisks := make([]interface{}, 0)
|
||||
addedDisks := make([]interface{}, 0)
|
||||
resizedDisks := make([]interface{}, 0)
|
||||
renamedDisks := make([]interface{}, 0)
|
||||
changeStoragePolicyDisks := make([]interface{}, 0)
|
||||
|
||||
oldDisks, newDisks := d.GetChange("disks")
|
||||
oldConv := oldDisks.([]interface{})
|
||||
newConv := newDisks.([]interface{})
|
||||
|
||||
for _, el := range oldConv {
|
||||
if !isContainsDisk(newConv, el) && !isRenameDisk(newConv, el) && !isResizeDisk(newConv, el) {
|
||||
if !isContainsDisk(newConv, el) && !isRenameDisk(newConv, el) && !isResizeDisk(newConv, el) && !isChangeStoragePolicy(newConv, el) {
|
||||
flag := false
|
||||
extraDisks := d.Get("extra_disks").(*schema.Set).List()
|
||||
delDisk := el.(map[string]interface{})
|
||||
@@ -1104,19 +1190,12 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
if isRenameDisk(oldConv, el) {
|
||||
renamedDisks = append(renamedDisks, el)
|
||||
}
|
||||
if isChangeStoragePolicy(oldConv, el) {
|
||||
changeStoragePolicyDisks = append(changeStoragePolicyDisks, el)
|
||||
}
|
||||
}
|
||||
|
||||
if len(deletedDisks) > 0 {
|
||||
stopReq := compute.StopRequest{
|
||||
ComputeID: computeRec.ID,
|
||||
Force: false,
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().Compute().Stop(ctx, stopReq)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
for _, disk := range deletedDisks {
|
||||
diskConv := disk.(map[string]interface{})
|
||||
if diskConv["disk_type"].(string) == "B" {
|
||||
@@ -1136,14 +1215,6 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
req := compute.StartRequest{
|
||||
ComputeID: computeRec.ID,
|
||||
AltBootID: 0,
|
||||
}
|
||||
_, err = c.CloudAPI().Compute().Start(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(addedDisks) > 0 {
|
||||
@@ -1153,9 +1224,10 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
continue
|
||||
}
|
||||
req := compute.DiskAddRequest{
|
||||
ComputeID: computeRec.ID,
|
||||
DiskName: diskConv["disk_name"].(string),
|
||||
Size: uint64(diskConv["size"].(int)),
|
||||
ComputeID: computeRec.ID,
|
||||
DiskName: diskConv["disk_name"].(string),
|
||||
Size: uint64(diskConv["size"].(int)),
|
||||
StoragePolicyID: uint64(diskConv["storage_policy_id"].(int)),
|
||||
}
|
||||
|
||||
if diskConv["sep_id"].(int) != 0 {
|
||||
@@ -1214,6 +1286,22 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(changeStoragePolicyDisks) > 0 {
|
||||
for _, disk := range changeStoragePolicyDisks {
|
||||
diskConv := disk.(map[string]interface{})
|
||||
|
||||
req := disks.ChangeDiskStoragePolicyRequest{
|
||||
DiskID: uint64(diskConv["disk_id"].(int)),
|
||||
StoragePolicyID: uint64(diskConv["storage_policy_id"].(int)),
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().Disks().ChangeDiskStoragePolicy(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("affinity_label") {
|
||||
@@ -1650,9 +1738,10 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
|
||||
if oldImage.(int) != newImage.(int) {
|
||||
req := compute.RedeployRequest{
|
||||
ComputeID: computeRec.ID,
|
||||
ImageID: uint64(newImage.(int)),
|
||||
DataDisks: "KEEP",
|
||||
ComputeID: computeRec.ID,
|
||||
StoragePolicyID: uint64(d.Get("storage_policy_id").(int)),
|
||||
ImageID: uint64(newImage.(int)),
|
||||
DataDisks: "KEEP",
|
||||
}
|
||||
|
||||
if diskSize, ok := d.GetOk("boot_disk_size"); ok {
|
||||
@@ -1665,6 +1754,10 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
req.ForceStop = forceStop.(bool)
|
||||
}
|
||||
|
||||
if osVersion, ok := d.GetOk("os_version"); ok {
|
||||
req.OSVersion = osVersion.(string)
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().Compute().Redeploy(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
@@ -1758,6 +1851,18 @@ func isRenameDisk(els []interface{}, el interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func isChangeStoragePolicy(els []interface{}, el interface{}) bool {
|
||||
for _, elOld := range els {
|
||||
elOldConv := elOld.(map[string]interface{})
|
||||
elConv := el.(map[string]interface{})
|
||||
if elOldConv["disk_id"].(int) == elConv["disk_id"].(int) &&
|
||||
elOldConv["storage_policy_id"].(int) != elConv["storage_policy_id"].(int) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isContainsDisk(els []interface{}, el interface{}) bool {
|
||||
for _, elOld := range els {
|
||||
elOldConv := elOld.(map[string]interface{})
|
||||
@@ -1848,6 +1953,11 @@ func disksSubresourceSchemaMake() map[string]*schema.Schema {
|
||||
Required: true,
|
||||
Description: "Disk size in GiB",
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "Storage policy id of disk. The rules of the specified storage policy will be used.",
|
||||
},
|
||||
"sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -1908,6 +2018,10 @@ func disksSubresourceSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"to_clean": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
return rets
|
||||
}
|
||||
@@ -2002,14 +2116,6 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
||||
ValidateFunc: validation.IntAtLeast(1),
|
||||
Description: "ID of the resource group where this compute should be deployed.",
|
||||
},
|
||||
"driver": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
// ForceNew: true,
|
||||
StateFunc: statefuncs.StateFuncToUpper,
|
||||
ValidateFunc: validation.StringInSlice([]string{"KVM_X86"}, false), // observe case while validating
|
||||
Description: "Hardware architecture of this compute instance.",
|
||||
},
|
||||
"cpu": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
@@ -2025,6 +2131,11 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
||||
),
|
||||
Description: "Amount of RAM in MB to allocate to this compute instance.",
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "Storage policy id of compute. The rules of the specified storage policy will be used.",
|
||||
},
|
||||
"boot_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
@@ -2158,6 +2269,10 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
||||
Schema: disksSubresourceSchemaMake(),
|
||||
},
|
||||
},
|
||||
"boot_image_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
@@ -2377,6 +2492,46 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
||||
Description: "ID of the connected pci devices",
|
||||
},
|
||||
|
||||
"security_groups": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"net_type": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
StateFunc: statefuncs.StateFuncToUpper,
|
||||
ValidateFunc: validation.StringInSlice([]string{"VINS", "EXTNET", "VFNIC", "DPDK", "SDN", "TRUNK"}, false), // observe case while validating
|
||||
Description: "Type of the network",
|
||||
},
|
||||
"net_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "ID of the network",
|
||||
},
|
||||
"security_groups": {
|
||||
Type: schema.TypeSet,
|
||||
Required: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"enable_secgroups": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
Description: "list of security group IDs to apply to this interface",
|
||||
},
|
||||
"os_version": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "the OS version installed on the VM",
|
||||
},
|
||||
|
||||
// The rest are Compute properties, which are "computed" once it is created
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
@@ -2451,6 +2606,10 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"driver": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -2701,7 +2860,7 @@ func ResourceCompute() *schema.Resource {
|
||||
|
||||
CustomizeDiff: func(ctx context.Context, diff *schema.ResourceDiff, i interface{}) error {
|
||||
if diff.HasChanges() || diff.HasChanges("chipset", "pin_to_stack", "auto_start_w_node", "network", "affinity_rules", "anti_affinity_rules",
|
||||
"extra_disks", "tags", "port_forwarding", "user_access", "snapshot", "pci_devices", "preferred_cpu") {
|
||||
"extra_disks", "tags", "port_forwarding", "user_access", "snapshot", "pci_devices", "preferred_cpu", "security_groups") {
|
||||
diff.SetNewComputed("updated_time")
|
||||
diff.SetNewComputed("updated_by")
|
||||
}
|
||||
|
||||
@@ -34,10 +34,13 @@ package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/go-cty/cty"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
@@ -164,13 +167,13 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
oldList := oldSet.(*schema.Set).List()
|
||||
newList := newSet.(*schema.Set).List()
|
||||
|
||||
detachMap, changeIpMap, changeMacMap, changeMTUMap, attachMap := differenceNetwork(oldList, newList)
|
||||
updateNetwork := differenceNetwork(oldList, newList)
|
||||
|
||||
apiErrCount := 0
|
||||
var lastSavedError error
|
||||
|
||||
log.Debugf("utilityComputeNetworksConfigure: detach set has %d items for Compute ID %s", len(detachMap), d.Id())
|
||||
for _, netData := range detachMap {
|
||||
log.Debugf("utilityComputeNetworksConfigure: detach set has %d items for Compute ID %s", len(updateNetwork.DetachMap), d.Id())
|
||||
for _, netData := range updateNetwork.DetachMap {
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
req := compute.NetDetachRequest{
|
||||
ComputeID: computeId,
|
||||
@@ -187,8 +190,8 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("utilityComputeNetworksConfigure: changeIp set has %d items for Compute ID %s", len(changeIpMap), d.Id())
|
||||
for _, netData := range changeIpMap {
|
||||
log.Debugf("utilityComputeNetworksConfigure: changeIp set has %d items for Compute ID %s", len(updateNetwork.ChangeIPMap), d.Id())
|
||||
for _, netData := range updateNetwork.ChangeIPMap {
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
req := compute.ChangeIPRequest{
|
||||
ComputeID: computeId,
|
||||
@@ -208,7 +211,7 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
|
||||
needStart := false
|
||||
|
||||
if oldSet.(*schema.Set).Len() == len(detachMap) || oldSet.(*schema.Set).Len() == 0 || hasDPDKnetwork(attachMap) || len(changeMacMap) != 0 {
|
||||
if oldSet.(*schema.Set).Len() == len(updateNetwork.DetachMap) || oldSet.(*schema.Set).Len() == 0 || hasDPDKnetwork(updateNetwork.AttachMap) || len(updateNetwork.ChangeMacMap) != 0 {
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
if err := utilityComputeStop(ctx, computeId, m); err != nil {
|
||||
apiErrCount++
|
||||
@@ -219,8 +222,8 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("utilityComputeNetworksConfigure: changeMac set has %d items for Compute ID %s", len(changeMacMap), d.Id())
|
||||
for _, netData := range changeMacMap {
|
||||
log.Debugf("utilityComputeNetworksConfigure: changeMac set has %d items for Compute ID %s", len(updateNetwork.ChangeMacMap), d.Id())
|
||||
for _, netData := range updateNetwork.ChangeMacMap {
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
req := compute.ChangeMACRequest{
|
||||
ComputeID: computeId,
|
||||
@@ -237,9 +240,9 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(attachMap, func(i, j int) bool {
|
||||
weightI := attachMap[i]["weight"].(int)
|
||||
weightJ := attachMap[j]["weight"].(int)
|
||||
sort.Slice(updateNetwork.AttachMap, func(i, j int) bool {
|
||||
weightI := updateNetwork.AttachMap[i]["weight"].(int)
|
||||
weightJ := updateNetwork.AttachMap[j]["weight"].(int)
|
||||
if weightI == 0 {
|
||||
return false
|
||||
}
|
||||
@@ -249,8 +252,8 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
return weightI < weightJ
|
||||
})
|
||||
|
||||
log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", len(attachMap), d.Id())
|
||||
for _, netData := range attachMap {
|
||||
log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", len(updateNetwork.AttachMap), d.Id())
|
||||
for _, netData := range updateNetwork.AttachMap {
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
req := compute.NetAttachRequest{
|
||||
ComputeID: computeId,
|
||||
@@ -270,7 +273,11 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
req.SDNInterfaceID = netData["sdn_interface_id"].(string)
|
||||
}
|
||||
|
||||
if req.NetType == "DPDK" {
|
||||
if enabledNetwork(d.GetRawConfig().GetAttr("network"), req.NetID, req.NetType) {
|
||||
req.Enabled = netData["enabled"].(bool)
|
||||
}
|
||||
|
||||
if req.NetType == "DPDK" || req.NetType == "EXTNET" {
|
||||
req.MTU = uint64(netData["mtu"].(int))
|
||||
}
|
||||
|
||||
@@ -291,8 +298,8 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("utilityComputeNetworksConfigure: changeMTU set has %d items for Compute ID %s", len(changeMTUMap), d.Id())
|
||||
for _, netData := range changeMTUMap {
|
||||
log.Debugf("utilityComputeNetworksConfigure: changeMTU set has %d items for Compute ID %s", len(updateNetwork.ChangeMTUMap), d.Id())
|
||||
for _, netData := range updateNetwork.ChangeMTUMap {
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
req := compute.ChangeMTURequest{
|
||||
ComputeID: computeId,
|
||||
@@ -309,6 +316,28 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("utilityComputeNetworksConfigure: enableMap set has %d items for Compute ID %s", len(updateNetwork.EnableMap), d.Id())
|
||||
for _, netData := range updateNetwork.EnableMap {
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
req := compute.ChangeLinkStateRequest{
|
||||
ComputeID: computeId,
|
||||
Interface: netData["mac"].(string),
|
||||
State: "off",
|
||||
}
|
||||
|
||||
if netData["enabled"].(bool) {
|
||||
req.State = "on"
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().Compute().ChangeLinkState(ctx, req)
|
||||
if err != nil {
|
||||
log.Errorf("utilityComputeNetworksConfigure: failed to change link state network ID %d of type %s from Compute ID %s: %s",
|
||||
netData["net_id"].(int), netData["net_type"].(string), d.Id(), err)
|
||||
apiErrCount++
|
||||
lastSavedError = err
|
||||
}
|
||||
}
|
||||
|
||||
if apiErrCount > 0 {
|
||||
log.Errorf("utilityComputeNetworksConfigure: there were %d error(s) when managing networks of Compute ID %s. Last error was: %s",
|
||||
apiErrCount, d.Id(), lastSavedError)
|
||||
@@ -318,6 +347,75 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeSecGroupsConfigure(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
simpleCompRec, err := utilityComputeCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
apiErrCount := 0
|
||||
var lastSavedError error
|
||||
|
||||
oldSecGroups, newSecGroups := d.GetChange("security_groups")
|
||||
updateSecGroups := (newSecGroups.(*schema.Set).Difference(oldSecGroups.(*schema.Set))).List()
|
||||
|
||||
log.Debugf("utilityComputeSecGroupsConfigure: update security groups has %d items for Compute ID %s", len(updateSecGroups), d.Id())
|
||||
if len(updateSecGroups) > 0 {
|
||||
for _, elem := range updateSecGroups {
|
||||
secGroupsMap := elem.(map[string]interface{})
|
||||
|
||||
netType := secGroupsMap["net_type"].(string)
|
||||
netId := uint64(secGroupsMap["net_id"].(int))
|
||||
var mac string
|
||||
for _, iface := range simpleCompRec.Interfaces {
|
||||
if iface.NetID == netId && iface.NetType == netType {
|
||||
mac = iface.MAC
|
||||
break
|
||||
}
|
||||
}
|
||||
if mac == "" {
|
||||
log.Errorf("utilityComputeSecGroupsConfigure: Network with type %s and id %d is not connected to the compute %s",
|
||||
netType, netId, d.Id())
|
||||
apiErrCount++
|
||||
lastSavedError = errors.New(fmt.Sprintf("utilityComputeSecGroupsConfigure: Network with type %s and id %d is not connected to the compute %s",
|
||||
netType, netId, d.Id()))
|
||||
continue
|
||||
}
|
||||
secGroupsIDs := make([]uint64, 0)
|
||||
for _, id := range secGroupsMap["security_groups"].(*schema.Set).List() {
|
||||
secGroupsIDs = append(secGroupsIDs, uint64(id.(int)))
|
||||
}
|
||||
log.Debugf("utilityComputeSecGroupsConfigure: Configure security groups interface parameters on Network with type %s and id %d", netType, netId)
|
||||
req := compute.ChangeSecGroupsRequest{
|
||||
ComputeID: computeId,
|
||||
Interface: mac,
|
||||
SecGroups: secGroupsIDs,
|
||||
}
|
||||
|
||||
if secGroupsMap["enable_secgroups"] != nil {
|
||||
req.EnableSecGroups = secGroupsMap["enable_secgroups"].(bool)
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().Compute().ChangeSecGroups(ctx, req)
|
||||
if err != nil {
|
||||
apiErrCount++
|
||||
lastSavedError = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if apiErrCount > 0 {
|
||||
log.Errorf("utilityComputeSecGroupsConfigure: there were %d error(s) when managing security groups of Compute ID %s. Last error was: %s",
|
||||
apiErrCount, d.Id(), lastSavedError)
|
||||
return lastSavedError
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasDPDKnetwork(networkAttachMap []map[string]interface{}) bool {
|
||||
for _, elem := range networkAttachMap {
|
||||
if elem["net_type"].(string) == "DPDK" {
|
||||
@@ -407,12 +505,13 @@ func utilityComputeUpdatePciDevices(ctx context.Context, d *schema.ResourceData,
|
||||
return nil
|
||||
}
|
||||
|
||||
func differenceNetwork(oldList, newList []interface{}) (detachMap, changeIpMap, changeMacMap, changeMTUMap, attachMap []map[string]interface{}) {
|
||||
attachMap = make([]map[string]interface{}, 0)
|
||||
changeIpMap = make([]map[string]interface{}, 0)
|
||||
changeMacMap = make([]map[string]interface{}, 0)
|
||||
changeMTUMap = make([]map[string]interface{}, 0)
|
||||
detachMap = make([]map[string]interface{}, 0)
|
||||
func differenceNetwork(oldList, newList []interface{}) *updatedNetwork {
|
||||
attachMap := make([]map[string]interface{}, 0)
|
||||
changeIpMap := make([]map[string]interface{}, 0)
|
||||
changeMacMap := make([]map[string]interface{}, 0)
|
||||
changeMTUMap := make([]map[string]interface{}, 0)
|
||||
detachMap := make([]map[string]interface{}, 0)
|
||||
enableMap := make([]map[string]interface{}, 0)
|
||||
for _, oldNetwork := range oldList {
|
||||
oldMap := oldNetwork.(map[string]interface{})
|
||||
found := false
|
||||
@@ -430,6 +529,13 @@ func differenceNetwork(oldList, newList []interface{}) (detachMap, changeIpMap,
|
||||
if (newMap["net_type"].(string) == "EXTNET" || newMap["net_type"].(string) == "DPDK") && (newMap["mtu"] != oldMap["mtu"] && newMap["mtu"].(int) != 0) {
|
||||
changeMTUMap = append(changeMTUMap, newMap)
|
||||
}
|
||||
if newMap["enabled"].(bool) != oldMap["enabled"].(bool) {
|
||||
mac, _ := newMap["mac"].(string)
|
||||
if mac == "" {
|
||||
newMap["mac"] = oldMap["mac"]
|
||||
}
|
||||
enableMap = append(enableMap, newMap)
|
||||
}
|
||||
}
|
||||
if found {
|
||||
break
|
||||
@@ -457,7 +563,16 @@ func differenceNetwork(oldList, newList []interface{}) (detachMap, changeIpMap,
|
||||
attachMap = append(attachMap, newMap)
|
||||
}
|
||||
|
||||
return
|
||||
res := updatedNetwork{
|
||||
DetachMap: detachMap,
|
||||
ChangeIPMap: changeIpMap,
|
||||
ChangeMacMap: changeMacMap,
|
||||
ChangeMTUMap: changeMTUMap,
|
||||
AttachMap: attachMap,
|
||||
EnableMap: enableMap,
|
||||
}
|
||||
|
||||
return &res
|
||||
}
|
||||
|
||||
func compareNetwork(newMap, oldMap map[string]interface{}) bool {
|
||||
@@ -485,3 +600,25 @@ func utilityComputeUpdateZoneID(ctx context.Context, d *schema.ResourceData, m i
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func enabledNetwork(rawNetworkConfig cty.Value, netID uint64, netType string) bool {
|
||||
for _, netConfigVal := range rawNetworkConfig.AsValueSlice() {
|
||||
if netConfigVal.IsNull() {
|
||||
continue
|
||||
}
|
||||
|
||||
netConfig := netConfigVal.AsValueMap()
|
||||
|
||||
tempID, _ := netConfig["net_id"].AsBigFloat().Int64()
|
||||
configNetID := uint64(tempID)
|
||||
|
||||
configNetType := netConfig["net_type"].AsString()
|
||||
|
||||
if configNetID == netID && configNetType == netType {
|
||||
enabledVal := netConfig["enabled"]
|
||||
return !enabledVal.IsNull()
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -45,10 +45,37 @@ func utilityComputeAuditsCheckPresence(ctx context.Context, d *schema.ResourceDa
|
||||
req := compute.AuditsRequest{
|
||||
ComputeID: uint64(d.Get("compute_id").(int)),
|
||||
}
|
||||
if timestampAt, ok := d.GetOk("timestamp_at"); ok {
|
||||
req.TimestampAT = uint64(timestampAt.(int))
|
||||
}
|
||||
if timestampTo, ok := d.GetOk("timestamp_to"); ok {
|
||||
req.TimestampTO = uint64(timestampTo.(int))
|
||||
}
|
||||
if user, ok := d.GetOk("user"); ok {
|
||||
req.User = user.(string)
|
||||
}
|
||||
if call, ok := d.GetOk("call"); ok {
|
||||
req.Call = call.(string)
|
||||
}
|
||||
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||
req.SortBy = sortBy.(string)
|
||||
}
|
||||
if page, ok := d.GetOk("page"); ok {
|
||||
req.Page = uint64(page.(int))
|
||||
}
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
req.Size = uint64(size.(int))
|
||||
}
|
||||
if minStatusCode, ok := d.GetOk("min_status_code"); ok {
|
||||
req.MinStatusCode = uint64(minStatusCode.(int))
|
||||
}
|
||||
if maxStatusCode, ok := d.GetOk("max_status_code"); ok {
|
||||
req.MaxStatusCode = uint64(maxStatusCode.(int))
|
||||
}
|
||||
|
||||
computeAudits, err := c.CloudAPI().Compute().Audits(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return compute.ListAudits{}, err
|
||||
}
|
||||
return computeAudits, nil
|
||||
return *computeAudits, nil
|
||||
}
|
||||
|
||||
@@ -86,6 +86,9 @@ func utilityDataComputeListCheckPresence(ctx context.Context, d *schema.Resource
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
req.Size = uint64(size.(int))
|
||||
}
|
||||
if zoneID, ok := d.GetOk("zone_id"); ok {
|
||||
req.ZoneID = uint64(zoneID.(int))
|
||||
}
|
||||
|
||||
listComputes, err := c.CloudAPI().Compute().List(ctx, req)
|
||||
if err != nil {
|
||||
|
||||
@@ -175,6 +175,11 @@ func dsLBListSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Default: 0,
|
||||
},
|
||||
"zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Zone ID",
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
|
||||
@@ -92,6 +92,9 @@ func utilityLBListCheckPresence(ctx context.Context, d *schema.ResourceData, m i
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
req.Size = uint64(size.(int))
|
||||
}
|
||||
if zoneID, ok := d.GetOk("zone_id"); ok {
|
||||
req.ZoneID = uint64(zoneID.(int))
|
||||
}
|
||||
|
||||
log.Debugf("utilityLBListCheckPresence: load lb list")
|
||||
lbList, err := c.CloudAPI().LB().List(ctx, req)
|
||||
|
||||
@@ -269,6 +269,23 @@ func resourceLimitsSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"limit": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
@@ -418,6 +435,13 @@ func dataSourceRgSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
@@ -265,6 +265,13 @@ func dataSourceRgListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -250,6 +250,13 @@ func dataSourceRgListDeletedSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"storage_policy_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -94,6 +94,7 @@ func flattenResgroup(d *schema.ResourceData, details rg.RecordResourceGroup) err
|
||||
d.Set("account_name", details.AccountName)
|
||||
d.Set("acl", flattenRgAcl(details.ACL))
|
||||
d.Set("compute_features", details.ComputeFeatures)
|
||||
d.Set("storage_policy", flattenRgStoragePolicy(details.ResourceLimits.StoragePolicies))
|
||||
d.Set("vms", details.Computes)
|
||||
d.Set("created_by", details.CreatedBy)
|
||||
d.Set("created_time", details.CreatedTime)
|
||||
@@ -116,6 +117,8 @@ func flattenResgroup(d *schema.ResourceData, details rg.RecordResourceGroup) err
|
||||
d.Set("cpu_allocation_parameter", details.CPUAllocationParameter)
|
||||
d.Set("cpu_allocation_ratio", details.CPUAllocationRatio)
|
||||
d.Set("sdn_access_group_id", details.SDNAccessGroupID)
|
||||
d.Set("resource_limits", flattenRgResourceLimits(details.ResourceLimits))
|
||||
d.Set("storage_policy_ids", details.StoragePolicyIDs)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -204,6 +207,7 @@ func flattenRg(d *schema.ResourceData, itemRg rg.RecordResourceGroup) {
|
||||
d.Set("cpu_allocation_parameter", itemRg.CPUAllocationParameter)
|
||||
d.Set("cpu_allocation_ratio", itemRg.CPUAllocationRatio)
|
||||
d.Set("sdn_access_group_id", itemRg.SDNAccessGroupID)
|
||||
d.Set("storage_policy_ids", itemRg.StoragePolicyIDs)
|
||||
}
|
||||
|
||||
func flattenRgAudits(rgAudits rg.ListAudits) []map[string]interface{} {
|
||||
@@ -257,6 +261,7 @@ func flattenRgList(rgl *rg.ListResourceGroups) []map[string]interface{} {
|
||||
"cpu_allocation_parameter": rg.CPUAllocationParameter,
|
||||
"cpu_allocation_ratio": rg.CPUAllocationRatio,
|
||||
"sdn_access_group_id": rg.SDNAccessGroupID,
|
||||
"storage_policy_ids": rg.StoragePolicyIDs,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
@@ -282,19 +287,34 @@ func flattenRgAcl(rgAcls rg.ListACL) []map[string]interface{} {
|
||||
func flattenRgResourceLimits(rl rg.ResourceLimits) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"cu_c": rl.CUC,
|
||||
"cu_d": rl.CUD,
|
||||
"cu_i": rl.CUI,
|
||||
"cu_m": rl.CUM,
|
||||
"cu_dm": rl.CUDM,
|
||||
"cu_np": rl.CUNP,
|
||||
"gpu_units": rl.GPUUnits,
|
||||
"cu_c": rl.CUC,
|
||||
"cu_d": rl.CUD,
|
||||
"cu_i": rl.CUI,
|
||||
"cu_m": rl.CUM,
|
||||
"cu_dm": rl.CUDM,
|
||||
"cu_np": rl.CUNP,
|
||||
"gpu_units": rl.GPUUnits,
|
||||
"storage_policy": flattenRgStoragePolicy(rl.StoragePolicies),
|
||||
}
|
||||
res = append(res, temp)
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRgStoragePolicy(spList []rg.StoragePolicy) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(spList))
|
||||
for _, sp := range spList {
|
||||
temp := map[string]interface{}{
|
||||
"id": sp.ID,
|
||||
"limit": sp.Limit,
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRules(list rg.ListRules) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(list))
|
||||
for _, rule := range list {
|
||||
|
||||
@@ -164,6 +164,27 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
|
||||
req.SDNAccessGroupID = sdnAccessGroupID.(string)
|
||||
}
|
||||
|
||||
if storagePolicies, ok := d.GetOk("storage_policy"); ok {
|
||||
var id uint64
|
||||
var limit int
|
||||
|
||||
if storagePolicies.(*schema.Set).Len() > 0 {
|
||||
spList := storagePolicies.(*schema.Set).List()
|
||||
for _, spInterface := range spList {
|
||||
sps := spInterface.(map[string]interface{})
|
||||
id = uint64(sps["id"].(int))
|
||||
limit = sps["limit"].(int)
|
||||
|
||||
spModel := rg.StoragePolicy{
|
||||
ID: id,
|
||||
Limit: limit,
|
||||
}
|
||||
|
||||
req.StoragePolicies = append(req.StoragePolicies, spModel)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
apiResp, err := c.CloudAPI().RG().Create(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
@@ -404,7 +425,16 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
|
||||
return diag.FromErr(fmt.Errorf("resourceResgroupUpdate: RG ID %s: changing ext_net_id for existing RG is not allowed", d.Id()))
|
||||
}
|
||||
|
||||
if d.HasChanges("name", "quota", "description", "uniq_pools") {
|
||||
needUpdateSP := false
|
||||
if d.HasChange("storage_policy") {
|
||||
needUpdate, err := utilityRGUpdateStPolicy(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
needUpdateSP = needUpdate
|
||||
}
|
||||
|
||||
if d.HasChanges("name", "quota", "description", "uniq_pools") || needUpdateSP {
|
||||
if err := utilityUpdateRG(ctx, d, m, rgData.ID); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
@@ -655,7 +685,6 @@ func ResourceRgSchemaMake() map[string]*schema.Schema {
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"def_net": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
@@ -677,7 +706,24 @@ func ResourceRgSchemaMake() map[string]*schema.Schema {
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"storage_policy": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"limit": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: -1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
@@ -835,6 +881,65 @@ func ResourceRgSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"resource_limits": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"cu_c": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"cu_d": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"cu_dm": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"cu_i": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"cu_m": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"cu_np": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"gpu_units": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy": {
|
||||
Type: schema.TypeSet,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"limit": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -863,6 +968,10 @@ func ResourceResgroup() *schema.Resource {
|
||||
if diff.HasChange("def_net") {
|
||||
diff.SetNewComputed("def_net_id")
|
||||
}
|
||||
if diff.HasChange("storage_policy") {
|
||||
diff.SetNewComputed("storage_policy_ids")
|
||||
diff.SetNewComputed("resource_limits")
|
||||
}
|
||||
if diff.HasChanges() {
|
||||
diff.SetNewComputed("updated_by")
|
||||
diff.SetNewComputed("updated_time")
|
||||
|
||||
@@ -124,6 +124,31 @@ func utilityUpdateRG(ctx context.Context, d *schema.ResourceData, m interface{},
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("storage_policy") {
|
||||
log.Debugf("resourceResgroupUpdate: storage policies specified.")
|
||||
|
||||
if storagePolicies, ok := d.GetOk("storage_policy"); ok {
|
||||
var id uint64
|
||||
var limit int
|
||||
|
||||
if storagePolicies.(*schema.Set).Len() > 0 {
|
||||
spList := storagePolicies.(*schema.Set).List()
|
||||
for _, spInterface := range spList {
|
||||
sps := spInterface.(map[string]interface{})
|
||||
id = uint64(sps["id"].(int))
|
||||
limit = sps["limit"].(int)
|
||||
|
||||
spModel := rg.StoragePolicy{
|
||||
ID: id,
|
||||
Limit: limit,
|
||||
}
|
||||
|
||||
req.StoragePolicies = append(req.StoragePolicies, spModel)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("description") {
|
||||
log.Debugf("resourceResgroupUpdate: description specified - looking for deltas from the old settings.")
|
||||
req.Description = d.Get("description").(string)
|
||||
@@ -139,9 +164,112 @@ func utilityUpdateRG(ctx context.Context, d *schema.ResourceData, m interface{},
|
||||
}
|
||||
}
|
||||
|
||||
_, _, updateStPolicy := utilityGetStoragePolicyUpdate(ctx, d, m)
|
||||
if len(updateStPolicy) != 0 {
|
||||
storagePolicies := make([]rg.StoragePolicy, 0, len(updateStPolicy))
|
||||
for _, stPolicyVal := range updateStPolicy {
|
||||
reqStPolicy := rg.StoragePolicy{
|
||||
ID: uint64(stPolicyVal["id"].(int)),
|
||||
Limit: stPolicyVal["limit"].(int),
|
||||
}
|
||||
|
||||
storagePolicies = append(storagePolicies, reqStPolicy)
|
||||
}
|
||||
req.StoragePolicies = storagePolicies
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().RG().Update(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityRGUpdateStPolicy(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
rgID, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
addSP, delSP, updateSP := utilityGetStoragePolicyUpdate(ctx, d, m)
|
||||
needUpdate := len(updateSP) > 0
|
||||
|
||||
if len(addSP) > 0 {
|
||||
for _, spMap := range addSP {
|
||||
req := rg.AddStoragePolicyRequest{
|
||||
RGID: rgID,
|
||||
StoragePolicyID: uint64(spMap["id"].(int)),
|
||||
Limit: spMap["limit"].(int),
|
||||
}
|
||||
log.Debugf("utilityRGUpdateStPolicy: starting to add storage policy ID:%d for resource group %d", req.StoragePolicyID, req.RGID)
|
||||
_, err := c.CloudAPI().RG().AddStoragePolicy(ctx, req)
|
||||
if err != nil {
|
||||
return needUpdate, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(delSP) > 0 {
|
||||
for _, spMap := range delSP {
|
||||
req := rg.DelStoragePolicyRequest{
|
||||
RGID: rgID,
|
||||
StoragePolicyID: uint64(spMap["id"].(int)),
|
||||
}
|
||||
log.Debugf("utilityRGUpdateStPolicy: starting to delete storage policy ID:%d from resource group %d", req.StoragePolicyID, req.RGID)
|
||||
_, err := c.CloudAPI().RG().DelStoragePolicy(ctx, req)
|
||||
if err != nil {
|
||||
return needUpdate, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return needUpdate, nil
|
||||
}
|
||||
|
||||
func utilityGetStoragePolicyUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) (added, deleted, updated []map[string]interface{}) {
|
||||
added = make([]map[string]interface{}, 0)
|
||||
deleted = make([]map[string]interface{}, 0)
|
||||
updated = make([]map[string]interface{}, 0)
|
||||
oldSet, newSet := d.GetChange("storage_policy")
|
||||
oldList := oldSet.(*schema.Set).List()
|
||||
newList := newSet.(*schema.Set).List()
|
||||
|
||||
for _, oldSP := range oldList {
|
||||
oldMap := oldSP.(map[string]interface{})
|
||||
found := false
|
||||
for _, newSP := range newList {
|
||||
newMap := newSP.(map[string]interface{})
|
||||
if oldMap["id"] == newMap["id"] {
|
||||
found = true
|
||||
if oldMap["limit"] != newMap["limit"] {
|
||||
updated = append(added, newMap)
|
||||
}
|
||||
break
|
||||
}
|
||||
if found {
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
deleted = append(deleted, oldMap)
|
||||
}
|
||||
|
||||
for _, newSP := range newList {
|
||||
newMap := newSP.(map[string]interface{})
|
||||
found := false
|
||||
for _, oldSP := range oldList {
|
||||
oldMap := oldSP.(map[string]interface{})
|
||||
if oldMap["id"] == newMap["id"] {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
added = append(added, newMap)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -0,0 +1,38 @@
|
||||
package secgroup
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceSecurityGroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
securityGroup, err := utilitySecurityGroupCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
flattenSecurityGroup(d, securityGroup)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func DataSourceSecurityGroup() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceSecurityGroupRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceSecurityGroupSchemaMake(),
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
package secgroup
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceSecurityGroupListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
storagePolicyList, err := utilitySecurityGroupListCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
d.SetId("")
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
d.Set("items", flattenSecurityGroupList(storagePolicyList))
|
||||
d.Set("entry_count", storagePolicyList.EntryCount)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func DataSourceSecurityGroupList() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceSecurityGroupListRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceSecurityGroupListSchemaMake(),
|
||||
}
|
||||
}
|
||||
68
internal/service/cloudapi/secgroup/flattens.go
Normal file
68
internal/service/cloudapi/secgroup/flattens.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package secgroup
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/secgroup"
|
||||
)
|
||||
|
||||
func flattenSecurityGroupResource(d *schema.ResourceData, securityGroup *secgroup.RecordSecurityGroup) {
|
||||
d.Set("security_group_id", securityGroup.ID)
|
||||
d.Set("account_id", securityGroup.AccountID)
|
||||
d.Set("name", securityGroup.Name)
|
||||
d.Set("description", securityGroup.Description)
|
||||
d.Set("rules", flattenRules(securityGroup.Rules))
|
||||
d.Set("created_at", securityGroup.CreatedAt)
|
||||
d.Set("created_by", securityGroup.CreatedBy)
|
||||
d.Set("updated_at", securityGroup.UpdatedAt)
|
||||
d.Set("updated_by", securityGroup.UpdatedBy)
|
||||
}
|
||||
|
||||
func flattenSecurityGroupList(securityGroupList *secgroup.ListSecurityGroups) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(securityGroupList.Data))
|
||||
for _, v := range securityGroupList.Data {
|
||||
temp := map[string]interface{}{
|
||||
"account_id": v.AccountID,
|
||||
"name": v.Name,
|
||||
"description": v.Description,
|
||||
"rules": flattenRules(v.Rules),
|
||||
"created_at": v.CreatedAt,
|
||||
"created_by": v.CreatedBy,
|
||||
"security_group_id": v.ID,
|
||||
"updated_at": v.UpdatedAt,
|
||||
"updated_by": v.UpdatedBy,
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenSecurityGroup(d *schema.ResourceData, securityGroup *secgroup.RecordSecurityGroup) {
|
||||
d.Set("security_group_id", securityGroup.ID)
|
||||
d.Set("account_id", securityGroup.AccountID)
|
||||
d.Set("name", securityGroup.Name)
|
||||
d.Set("description", securityGroup.Description)
|
||||
d.Set("rules", flattenRules(securityGroup.Rules))
|
||||
d.Set("created_at", securityGroup.CreatedAt)
|
||||
d.Set("created_by", securityGroup.CreatedBy)
|
||||
d.Set("updated_at", securityGroup.UpdatedAt)
|
||||
d.Set("updated_by", securityGroup.UpdatedBy)
|
||||
}
|
||||
|
||||
func flattenRules(rules secgroup.Rules) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(rules))
|
||||
for _, rule := range rules {
|
||||
temp := map[string]interface{}{
|
||||
"id": rule.ID,
|
||||
"direction": rule.Direction,
|
||||
"ethertype": rule.Ethertype,
|
||||
"protocol": rule.Protocol,
|
||||
"port_range_min": rule.PortRangeMin,
|
||||
"port_range_max": rule.PortRangeMax,
|
||||
"remote_ip_prefix": rule.RemoteIPPrefix,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
146
internal/service/cloudapi/secgroup/resource_security_group.go
Normal file
146
internal/service/cloudapi/secgroup/resource_security_group.go
Normal file
@@ -0,0 +1,146 @@
|
||||
package secgroup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/secgroup"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
|
||||
)
|
||||
|
||||
func resourceSecurityGroupCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
log.Debugf("resourceSecurityGroupCreate: called with account ID %d, name %s", uint64(d.Get("account_id").(int)), d.Get("name").(string))
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
warnings := dc.Warnings{}
|
||||
|
||||
accountID := uint64(d.Get("account_id").(int))
|
||||
name := d.Get("name").(string)
|
||||
|
||||
req := secgroup.CreateRequest{
|
||||
AccountID: accountID,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
if description, ok := d.GetOk("description"); ok {
|
||||
req.Description = description.(string)
|
||||
}
|
||||
|
||||
securityGroupID, err := c.CloudAPI().SecurityGroup().Create(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.SetId(strconv.FormatUint(securityGroupID, 10))
|
||||
d.Set("security_group_id", securityGroupID)
|
||||
|
||||
return append(warnings.Get(), resourceSecurityGroupRead(ctx, d, m)...)
|
||||
}
|
||||
|
||||
func resourceSecurityGroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
log.Debugf("resourceSecurityGroupRead: called with with account ID %d", uint64(d.Get("account_id").(int)))
|
||||
|
||||
w := dc.Warnings{}
|
||||
|
||||
securityGroupItem, err := utilitySecurityGroupCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
d.SetId("")
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
flattenSecurityGroupResource(d, securityGroupItem)
|
||||
|
||||
return w.Get()
|
||||
}
|
||||
|
||||
func resourceSecurityGroupUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
securityGroupID, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
log.Debugf("resourceSecurityGroupUpdate: called security group with id %d", securityGroupID)
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
_, err := utilitySecurityGroupCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
|
||||
d.SetId("")
|
||||
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
_, err = strconv.ParseInt(d.Id(), 10, 64)
|
||||
if err != nil {
|
||||
|
||||
d.SetId("")
|
||||
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
if d.HasChanges("name", "description") {
|
||||
|
||||
if err := utilitySecurityGroupHandleHasChanges(ctx, d, c, securityGroupID); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("rules") {
|
||||
if err := utilitySecurityGroupUpdateRules(ctx, d, c, securityGroupID); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
return resourceSecurityGroupRead(ctx, d, m)
|
||||
}
|
||||
|
||||
func resourceSecurityGroupDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
log.Debugf("resourceSecurityGroupDelete: called with id %s", d.Id())
|
||||
|
||||
securityGroupItem, err := utilitySecurityGroupCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
|
||||
d.SetId("")
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := secgroup.DeleteRequest{
|
||||
SecurityGroupID: securityGroupItem.ID,
|
||||
}
|
||||
if _, err := c.CloudAPI().SecurityGroup().Delete(ctx, req); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ResourceSecurityGroup() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
CreateContext: resourceSecurityGroupCreate,
|
||||
ReadContext: resourceSecurityGroupRead,
|
||||
UpdateContext: resourceSecurityGroupUpdate,
|
||||
DeleteContext: resourceSecurityGroupDelete,
|
||||
|
||||
Importer: &schema.ResourceImporter{
|
||||
StateContext: schema.ImportStatePassthroughContext,
|
||||
},
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Create: &constants.Timeout600s,
|
||||
Read: &constants.Timeout600s,
|
||||
Update: &constants.Timeout600s,
|
||||
Delete: &constants.Timeout600s,
|
||||
Default: &constants.Timeout600s,
|
||||
},
|
||||
|
||||
Schema: resourceSecurityGroupSchemaMake(),
|
||||
}
|
||||
}
|
||||
288
internal/service/cloudapi/secgroup/schema.go
Normal file
288
internal/service/cloudapi/secgroup/schema.go
Normal file
@@ -0,0 +1,288 @@
|
||||
package secgroup
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
)
|
||||
|
||||
func resourceSecurityGroupSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"rules": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"direction": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{"inbound", "outbound"}, true),
|
||||
},
|
||||
"ethertype": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "IPv4",
|
||||
ValidateFunc: validation.StringInSlice([]string{"IPv4", "IPv6"}, true),
|
||||
},
|
||||
"protocol": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{"icmp", "tcp", "udp"}, true),
|
||||
},
|
||||
"port_range_min": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"port_range_max": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"remote_ip_prefix": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"security_group_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"created_at": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_at": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"created_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func dataSourceSecurityGroupSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"security_group_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"rules": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"direction": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"ethertype": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"protocol": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"port_range_min": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"port_range_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"remote_ip_prefix": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"created_at": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_at": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"created_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func dataSourceSecurityGroupListSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"page": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"size": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"by_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"sort_by": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"created_min": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"created_max": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"updated_min": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"updated_max": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"security_group_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"rules": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"direction": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"ethertype": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"protocol": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"port_range_min": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"port_range_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"remote_ip_prefix": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"created_at": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_at": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"created_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
95
internal/service/cloudapi/secgroup/utility_security_group.go
Normal file
95
internal/service/cloudapi/secgroup/utility_security_group.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package secgroup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/secgroup"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilitySecurityGroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*secgroup.RecordSecurityGroup, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := secgroup.GetRequest{}
|
||||
if d.Id() != "" {
|
||||
securityGroupID, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
req.SecurityGroupID = securityGroupID
|
||||
} else {
|
||||
req.SecurityGroupID = uint64(d.Get("security_group_id").(int))
|
||||
}
|
||||
|
||||
log.Debugf("utilitySecurityGroupCheckPresence: load security group")
|
||||
securityGroup, err := c.CloudAPI().SecurityGroup().Get(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return securityGroup, nil
|
||||
}
|
||||
|
||||
func utilitySecurityGroupHandleHasChanges(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, securityGroupID uint64) error {
|
||||
req := secgroup.UpdateRequest{
|
||||
SecurityGroupID: securityGroupID,
|
||||
}
|
||||
|
||||
if d.HasChange("name") {
|
||||
name := d.Get("name").(string)
|
||||
req.Name = name
|
||||
}
|
||||
|
||||
if d.HasChange("description") {
|
||||
description := d.Get("description").(string)
|
||||
req.Description = description
|
||||
}
|
||||
|
||||
if _, err := c.CloudAPI().SecurityGroup().Update(ctx, req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilitySecurityGroupUpdateRules(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, securityGroupID uint64) error {
|
||||
oldSet, newSet := d.GetChange("rules")
|
||||
deletedRules := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
|
||||
for _, deletedInterface := range deletedRules {
|
||||
deletedItem := deletedInterface.(map[string]interface{})
|
||||
ruleID := uint64(deletedItem["id"].(int))
|
||||
req := secgroup.DeleteRuleRequest{
|
||||
SecurityGroupID: securityGroupID,
|
||||
RuleID: ruleID,
|
||||
}
|
||||
if _, err := c.CloudAPI().SecurityGroup().DeleteRule(ctx, req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
addedRules := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
|
||||
for _, addedInterface := range addedRules {
|
||||
addedItem := addedInterface.(map[string]interface{})
|
||||
direction := addedItem["direction"].(string)
|
||||
ethertype := addedItem["ethertype"].(string)
|
||||
protocol := addedItem["protocol"].(string)
|
||||
portRangeMin := uint64(addedItem["port_range_min"].(int))
|
||||
portRangeMax := uint64(addedItem["port_range_max"].(int))
|
||||
remoteIPPrefix := addedItem["remote_ip_prefix"].(string)
|
||||
req := secgroup.CreateRuleRequest{
|
||||
SecurityGroupID: securityGroupID,
|
||||
Direction: direction,
|
||||
Ethertype: ethertype,
|
||||
Protocol: protocol,
|
||||
PortRangeMin: portRangeMin,
|
||||
PortRangeMax: portRangeMax,
|
||||
RemoteIPPrefix: remoteIPPrefix,
|
||||
}
|
||||
|
||||
if _, err := c.CloudAPI().SecurityGroup().CreateRule(ctx, req); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,60 @@
|
||||
package secgroup
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/secgroup"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilitySecurityGroupListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*secgroup.ListSecurityGroups, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
req := secgroup.ListRequest{}
|
||||
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
req.Size = uint64(size.(int))
|
||||
}
|
||||
if page, ok := d.GetOk("page"); ok {
|
||||
req.Page = uint64(page.(int))
|
||||
}
|
||||
if byID, ok := d.GetOk("by_id"); ok {
|
||||
req.ByID = uint64(byID.(int))
|
||||
}
|
||||
if accountID, ok := d.GetOk("account_id"); ok {
|
||||
req.AccountID = uint64(accountID.(int))
|
||||
}
|
||||
if name, ok := d.GetOk("name"); ok {
|
||||
req.Name = name.(string)
|
||||
}
|
||||
if desc, ok := d.GetOk("desc"); ok {
|
||||
req.Description = desc.(string)
|
||||
}
|
||||
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||
req.SortBy = sortBy.(string)
|
||||
}
|
||||
if createdMin, ok := d.GetOk("created_min"); ok {
|
||||
req.CreatedMin = uint64(createdMin.(int))
|
||||
}
|
||||
if createdMax, ok := d.GetOk("created_max"); ok {
|
||||
req.CreatedMax = uint64(createdMax.(int))
|
||||
}
|
||||
if updatedMin, ok := d.GetOk("updated_min"); ok {
|
||||
req.UpdatedMin = uint64(updatedMin.(int))
|
||||
}
|
||||
if updatedMax, ok := d.GetOk("updated_max"); ok {
|
||||
req.UpdatedMax = uint64(updatedMax.(int))
|
||||
}
|
||||
|
||||
log.Debugf("utilitySecurityGroupListCheckPresence: load storage policy list")
|
||||
|
||||
securityGroupList, err := c.CloudAPI().SecurityGroup().List(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return securityGroupList, nil
|
||||
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
package stpolicy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceStoragePolicyRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
storagePolicy, err := utilityStoragePolicyCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
flattenStoragePolicyData(d, storagePolicy)
|
||||
return nil
|
||||
}
|
||||
|
||||
func DataSourceStoragePolicy() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceStoragePolicyRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceStoragePolicySchemaMake(),
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
package stpolicy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceStoragePolicyListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
storagePolicyList, err := utilityStoragePolicyListCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
d.SetId("")
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
d.Set("items", flattenStoragePolicyList(storagePolicyList))
|
||||
d.Set("entry_count", storagePolicyList.EntryCount)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func DataSourceStoragePolicyList() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceStoragePolicyListRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceStoragePolicyListSchemaMake(),
|
||||
}
|
||||
}
|
||||
63
internal/service/cloudapi/stpolicy/flattens.go
Normal file
63
internal/service/cloudapi/stpolicy/flattens.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package stpolicy
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/stpolicy"
|
||||
)
|
||||
|
||||
func flattenStoragePolicyData(d *schema.ResourceData, storagePolicy *stpolicy.InfoStoragePolicy) {
|
||||
d.Set("storage_policy_id", storagePolicy.ID)
|
||||
d.Set("description", storagePolicy.Description)
|
||||
d.Set("guid", storagePolicy.GUID)
|
||||
d.Set("limit_iops", storagePolicy.LimitIOPS)
|
||||
d.Set("name", storagePolicy.Name)
|
||||
d.Set("status", storagePolicy.Status)
|
||||
d.Set("access_seps_pools", flattenAccessSEPPools(storagePolicy.AccessSEPPools))
|
||||
d.Set("usage", flattenUsage(storagePolicy.Usage))
|
||||
}
|
||||
|
||||
func flattenAccessSEPPools(accessSEPPools stpolicy.ListAccessSEPPools) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(accessSEPPools))
|
||||
for _, asp := range accessSEPPools {
|
||||
temp := map[string]interface{}{
|
||||
"sep_id": asp.SEPID,
|
||||
//TODO
|
||||
//"name": asp.Name,
|
||||
"pool_names": asp.PoolNames,
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenUsage(usage stpolicy.Usage) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
|
||||
temp := map[string]interface{}{
|
||||
"accounts": usage.Accounts,
|
||||
"resgroups": usage.Resgroups,
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenStoragePolicyList(storagePolicyList *stpolicy.ListStoragePolicies) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(storagePolicyList.Data))
|
||||
for _, v := range storagePolicyList.Data {
|
||||
temp := map[string]interface{}{
|
||||
"storage_policy_id": v.ID,
|
||||
"description": v.Description,
|
||||
"guid": v.GUID,
|
||||
"limit_iops": v.LimitIOPS,
|
||||
"name": v.Name,
|
||||
"status": v.Status,
|
||||
"access_seps_pools": flattenAccessSEPPools(v.AccessSEPPools),
|
||||
"usage": flattenUsage(v.Usage),
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
207
internal/service/cloudapi/stpolicy/schema.go
Normal file
207
internal/service/cloudapi/stpolicy/schema.go
Normal file
@@ -0,0 +1,207 @@
|
||||
package stpolicy
|
||||
|
||||
import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
|
||||
func dataSourceStoragePolicySchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"limit_iops": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"access_seps_pools": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"pool_names": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"usage": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"accounts": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"resgroups": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func dataSourceStoragePolicyListSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"page": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"size": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"by_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"limit_iops": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"sort_by": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"resgroup_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"pool_name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"limit_iops": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"access_seps_pools": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"pool_names": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"usage": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"accounts": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"resgroups": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
29
internal/service/cloudapi/stpolicy/utility_storage_policy.go
Normal file
29
internal/service/cloudapi/stpolicy/utility_storage_policy.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package stpolicy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/stpolicy"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityStoragePolicyCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*stpolicy.InfoStoragePolicy, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := stpolicy.GetRequest{}
|
||||
|
||||
if d.Id() != "" {
|
||||
storagePolicyID, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
req.StoragePolicyID = storagePolicyID
|
||||
} else {
|
||||
req.StoragePolicyID = uint64(d.Get("storage_policy_id").(int))
|
||||
}
|
||||
|
||||
storagePolicyData, err := c.CloudAPI().StPolicy().Get(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return storagePolicyData, nil
|
||||
}
|
||||
@@ -0,0 +1,71 @@
|
||||
package stpolicy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/stpolicy"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityStoragePolicyListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*stpolicy.ListStoragePolicies, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := stpolicy.ListRequest{}
|
||||
|
||||
if accountID, ok := d.GetOk("account_id"); ok {
|
||||
req.AccountID = uint64(accountID.(int))
|
||||
}
|
||||
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
req.Size = uint64(size.(int))
|
||||
}
|
||||
if page, ok := d.GetOk("page"); ok {
|
||||
req.Page = uint64(page.(int))
|
||||
}
|
||||
|
||||
if byID, ok := d.GetOk("by_id"); ok {
|
||||
req.ByID = uint64(byID.(int))
|
||||
}
|
||||
|
||||
if name, ok := d.GetOk("name"); ok {
|
||||
req.Name = name.(string)
|
||||
}
|
||||
|
||||
if status, ok := d.GetOk("status"); ok {
|
||||
req.Status = status.(string)
|
||||
}
|
||||
|
||||
if desc, ok := d.GetOk("desc"); ok {
|
||||
req.Desc = desc.(string)
|
||||
}
|
||||
|
||||
if limitIOPS, ok := d.GetOk("limit_iops"); ok {
|
||||
req.LimitIOPS = uint64(limitIOPS.(int))
|
||||
}
|
||||
|
||||
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||
req.SortBy = sortBy.(string)
|
||||
}
|
||||
|
||||
if resgroupID, ok := d.GetOk("resgroup_id"); ok {
|
||||
req.ResgroupID = uint64(resgroupID.(int))
|
||||
}
|
||||
|
||||
if SEPID, ok := d.GetOk("sep_id"); ok {
|
||||
req.SepID = uint64(SEPID.(int))
|
||||
}
|
||||
|
||||
if poolName, ok := d.GetOk("pool_name"); ok {
|
||||
req.PoolName = poolName.(string)
|
||||
}
|
||||
|
||||
log.Debugf("utilityStoragePolicyListCheckPresence: load storage policy list")
|
||||
|
||||
storagePolicyList, err := c.CloudAPI().StPolicy().List(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return storagePolicyList, nil
|
||||
}
|
||||
@@ -28,6 +28,9 @@ func utilityTrunkListCheckPresence(ctx context.Context, d *schema.ResourceData,
|
||||
if trunkTags, ok := d.GetOk("trunk_tags"); ok {
|
||||
req.TrunkTags = trunkTags.(string)
|
||||
}
|
||||
if status, ok := d.GetOk("status"); ok {
|
||||
req.Status = status.(string)
|
||||
}
|
||||
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||
req.SortBy = sortBy.(string)
|
||||
}
|
||||
|
||||
@@ -134,6 +134,10 @@ func vnfInterfaceSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"enable_secgroups": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"flipgroup_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -234,6 +238,13 @@ func vnfInterfaceSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"security_groups": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"target": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -327,6 +338,10 @@ func vnfDevSchemaMake() map[string]*schema.Schema {
|
||||
Schema: vnfInterfaceSchemaMake(),
|
||||
},
|
||||
},
|
||||
"live_migration_job_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"lock_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -362,6 +377,10 @@ func vnfDevSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -554,6 +573,10 @@ func dhcpSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -678,6 +701,13 @@ func gwSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"routes": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: routesSchemaMake(),
|
||||
},
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -690,6 +720,10 @@ func gwSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -810,6 +844,13 @@ func natSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"routes": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: routesSchemaMake(),
|
||||
},
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -822,6 +863,10 @@ func natSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -114,6 +114,11 @@ func dataSourceVinsListSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "Page size",
|
||||
},
|
||||
"zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Zone ID",
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
|
||||
@@ -91,6 +91,7 @@ func flattenInterfaces(interfaces []vins.ItemVNFInterface) []map[string]interfac
|
||||
"conn_type": vnfInterface.ConnType,
|
||||
"def_gw": vnfInterface.DefGW,
|
||||
"enabled": vnfInterface.Enabled,
|
||||
"enable_secgroups": vnfInterface.EnableSecGroups,
|
||||
"flipgroup_id": vnfInterface.FLIPGroupID,
|
||||
"guid": vnfInterface.GUID,
|
||||
"ip_address": vnfInterface.IPAddress,
|
||||
@@ -106,6 +107,7 @@ func flattenInterfaces(interfaces []vins.ItemVNFInterface) []map[string]interfac
|
||||
"bus_number": vnfInterface.BusNumber,
|
||||
"qos": flattenQOS(vnfInterface.QOS),
|
||||
"sdn_interface_id": vnfInterface.SDNInterfaceID,
|
||||
"security_groups": vnfInterface.SecGroups,
|
||||
"target": vnfInterface.Target,
|
||||
"type": vnfInterface.Type,
|
||||
"vnfs": vnfInterface.VNFs,
|
||||
@@ -135,25 +137,26 @@ func flattenLibvirtSettings(libvirtSettings vins.LibvirtSettings) []map[string]i
|
||||
func flattenVNFDev(vnfDev vins.RecordVNFDev) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"_ckey": vnfDev.CKey,
|
||||
"account_id": vnfDev.AccountID,
|
||||
"capabilities": vnfDev.Capabilities,
|
||||
"config": flattenConfig(vnfDev.Config), //in progress
|
||||
"config_saved": vnfDev.ConfigSaved,
|
||||
"custom_pre_cfg": vnfDev.CustomPreConfig,
|
||||
"desc": vnfDev.Description,
|
||||
"gid": vnfDev.GID,
|
||||
"guid": vnfDev.GUID,
|
||||
"vnf_id": vnfDev.ID,
|
||||
"interfaces": flattenInterfaces(vnfDev.Interfaces),
|
||||
"lock_status": vnfDev.LockStatus,
|
||||
"milestones": vnfDev.Milestones,
|
||||
"vnf_name": vnfDev.Name,
|
||||
"status": vnfDev.Status,
|
||||
"tech_status": vnfDev.TechStatus,
|
||||
"type": vnfDev.Type,
|
||||
"vnc_password": vnfDev.VNCPassword,
|
||||
"vins": vnfDev.VINS,
|
||||
"_ckey": vnfDev.CKey,
|
||||
"account_id": vnfDev.AccountID,
|
||||
"capabilities": vnfDev.Capabilities,
|
||||
"config": flattenConfig(vnfDev.Config), //in progress
|
||||
"config_saved": vnfDev.ConfigSaved,
|
||||
"custom_pre_cfg": vnfDev.CustomPreConfig,
|
||||
"desc": vnfDev.Description,
|
||||
"gid": vnfDev.GID,
|
||||
"guid": vnfDev.GUID,
|
||||
"vnf_id": vnfDev.ID,
|
||||
"interfaces": flattenInterfaces(vnfDev.Interfaces),
|
||||
"live_migration_job_id": vnfDev.LiveMigrationJobID,
|
||||
"lock_status": vnfDev.LockStatus,
|
||||
"milestones": vnfDev.Milestones,
|
||||
"vnf_name": vnfDev.Name,
|
||||
"status": vnfDev.Status,
|
||||
"tech_status": vnfDev.TechStatus,
|
||||
"type": vnfDev.Type,
|
||||
"vnc_password": vnfDev.VNCPassword,
|
||||
"vins": vnfDev.VINS,
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
@@ -249,6 +252,7 @@ func flattenDHCP(dhcp vins.RecordDHCP) []map[string]interface{} {
|
||||
"status": dhcp.Status,
|
||||
"tech_status": dhcp.TechStatus,
|
||||
"type": dhcp.Type,
|
||||
"zone_id": dhcp.ZoneID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
|
||||
@@ -285,9 +289,11 @@ func flattenGW(gw vins.RecordGW) []map[string]interface{} {
|
||||
"owner_id": gw.OwnerID,
|
||||
"owner_type": gw.OwnerType,
|
||||
"pure_virtual": gw.PureVirtual,
|
||||
"routes": flattenStaticRoute(gw.Routes),
|
||||
"status": gw.Status,
|
||||
"tech_status": gw.TechStatus,
|
||||
"type": gw.Type,
|
||||
"zone_id": gw.ZoneID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
|
||||
@@ -341,9 +347,11 @@ func flattenNAT(nat vins.RecordNAT) []map[string]interface{} {
|
||||
"owner_id": nat.OwnerID,
|
||||
"owner_type": nat.OwnerType,
|
||||
"pure_virtual": nat.PureVirtual,
|
||||
"routes": flattenStaticRoute(nat.Routes),
|
||||
"status": nat.Status,
|
||||
"tech_status": nat.TechStatus,
|
||||
"type": nat.Type,
|
||||
"zone_id": nat.ZoneID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
|
||||
|
||||
@@ -88,6 +88,9 @@ func utilityVinsListCheckPresence(ctx context.Context, d *schema.ResourceData, m
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
req.Size = uint64(size.(int))
|
||||
}
|
||||
if zoneID, ok := d.GetOk("zone_id"); ok {
|
||||
req.ZoneID = uint64(zoneID.(int))
|
||||
}
|
||||
|
||||
log.Debugf("utilityVinsListCheckPresence")
|
||||
vinsList, err := c.CloudAPI().VINS().List(ctx, req)
|
||||
|
||||
@@ -97,6 +97,55 @@ func dataSourceZoneSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"account_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"compute_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"extnet_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"vins_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"lb_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"bservice_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"k8s_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -52,6 +52,13 @@ func flattenZone(d *schema.ResourceData, item *zone.RecordZone) error {
|
||||
d.Set("created_time", item.CreatedTime)
|
||||
d.Set("updated_time", item.UpdatedTime)
|
||||
d.Set("node_ids", item.NodeIDs)
|
||||
d.Set("account_ids", item.AccountIDs)
|
||||
d.Set("compute_ids", item.ComputeIDs)
|
||||
d.Set("extnet_ids", item.ExtnetIDs)
|
||||
d.Set("vins_ids", item.VinsIDs)
|
||||
d.Set("lb_ids", item.LBIDs)
|
||||
d.Set("bservice_ids", item.BserviceIDs)
|
||||
d.Set("k8s_ids", item.K8SIDs)
|
||||
|
||||
log.Debugf("flattenZone: decoded RecordZone name %q / ID %d, complete",
|
||||
item.Name, item.ID)
|
||||
|
||||
@@ -3,16 +3,13 @@ package account
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
|
||||
)
|
||||
|
||||
func flattenResourceAccount(d *schema.ResourceData, acc *account.RecordAccount) {
|
||||
d.Set("dc_location", acc.DCLocation)
|
||||
d.Set("ckey", acc.CKey)
|
||||
d.Set("acl", flattenAccAcl(acc.ACL))
|
||||
d.Set("company", acc.Company)
|
||||
d.Set("companyurl", acc.CompanyURL)
|
||||
d.Set("compute_features", acc.ComputeFeatures)
|
||||
d.Set("cpu_allocation_parameter", acc.CPUAllocationParameter)
|
||||
d.Set("cpu_allocation_ratio", acc.CPUAllocationRatio)
|
||||
d.Set("created_by", acc.CreatedBy)
|
||||
@@ -27,6 +24,7 @@ func flattenResourceAccount(d *schema.ResourceData, acc *account.RecordAccount)
|
||||
d.Set("account_id", acc.ID)
|
||||
d.Set("account_name", acc.Name)
|
||||
d.Set("resource_limits", flattenRgResourceLimits(acc.ResourceLimits))
|
||||
d.Set("storage_policy", flattenSTPolicy(acc.ResourceLimits.StoragePolicies))
|
||||
d.Set("resource_types", acc.ResTypes)
|
||||
d.Set("send_access_emails", acc.SendAccessEmails)
|
||||
d.Set("status", acc.Status)
|
||||
@@ -34,12 +32,11 @@ func flattenResourceAccount(d *schema.ResourceData, acc *account.RecordAccount)
|
||||
d.Set("updated_time", acc.UpdatedTime)
|
||||
d.Set("version", acc.Version)
|
||||
d.Set("vins", acc.VINS)
|
||||
d.Set("zone_ids", acc.ZoneIDs)
|
||||
d.Set("zone_ids", flattenZonesInResource(acc.ZoneIDs))
|
||||
}
|
||||
|
||||
func flattenDataAccount(d *schema.ResourceData, acc *account.RecordAccount) {
|
||||
d.Set("dc_location", acc.DCLocation)
|
||||
d.Set("ckey", acc.CKey)
|
||||
d.Set("acl", flattenAccAcl(acc.ACL))
|
||||
d.Set("company", acc.Company)
|
||||
d.Set("companyurl", acc.CompanyURL)
|
||||
@@ -142,11 +139,26 @@ func flattenAccResource(r account.Resource) []map[string]interface{} {
|
||||
"gpu": r.GPU,
|
||||
"ram": r.RAM,
|
||||
"seps": flattenAccountSeps(r.SEPs),
|
||||
"policies": flattenAccountPolicies(r.Policies),
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenAccountPolicies(policies map[string]account.Policy) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for k, dataVal := range policies {
|
||||
temp := map[string]interface{}{
|
||||
"id": k,
|
||||
"disk_size": dataVal.DiskSize,
|
||||
"disk_size_max": dataVal.DiskSizeMax,
|
||||
"seps": flattenAccountSeps(dataVal.SEPs),
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenAccAcl(acls []account.ACLWithEmails) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(acls))
|
||||
for _, acls := range acls {
|
||||
@@ -172,19 +184,32 @@ func flattenAccAcl(acls []account.ACLWithEmails) []map[string]interface{} {
|
||||
func flattenRgResourceLimits(rl account.ResourceLimits) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"cu_c": rl.CuC,
|
||||
"cu_d": rl.CuD,
|
||||
"cu_dm": rl.CuDM,
|
||||
"cu_i": rl.CuI,
|
||||
"cu_m": rl.CuM,
|
||||
"cu_np": rl.CuNP,
|
||||
"gpu_units": rl.GPUUnits,
|
||||
"cu_c": rl.CuC,
|
||||
"cu_d": rl.CuD,
|
||||
"cu_dm": rl.CuDM,
|
||||
"cu_i": rl.CuI,
|
||||
"cu_m": rl.CuM,
|
||||
"cu_np": rl.CuNP,
|
||||
"gpu_units": rl.GPUUnits,
|
||||
"storage_policy": flattenSTPolicy(rl.StoragePolicies),
|
||||
}
|
||||
res = append(res, temp)
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenSTPolicy(ast []account.StoragePolicy) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(ast))
|
||||
for _, item := range ast {
|
||||
temp := map[string]interface{}{
|
||||
"id": item.ID,
|
||||
"limit": item.Limit,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRgAcl(rgAcls []account.ACL) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, len(rgAcls))
|
||||
for _, rgAcl := range rgAcls {
|
||||
@@ -206,8 +231,6 @@ func flattenListDeleted(al *account.ListAccounts) []map[string]interface{} {
|
||||
for _, acc := range al.Data {
|
||||
temp := map[string]interface{}{
|
||||
"dc_location": acc.DCLocation,
|
||||
"ckey": acc.CKey,
|
||||
"meta": flattens.FlattenMeta(acc.Meta),
|
||||
"acl": flattenRgAcl(acc.ACL),
|
||||
"company": acc.Company,
|
||||
"companyurl": acc.CompanyURL,
|
||||
@@ -228,6 +251,7 @@ func flattenListDeleted(al *account.ListAccounts) []map[string]interface{} {
|
||||
"resource_types": acc.ResTypes,
|
||||
"send_access_emails": acc.SendAccessEmails,
|
||||
"status": acc.Status,
|
||||
"storage_policy_ids": acc.StoragePolicyIDs,
|
||||
"uniq_pools": acc.UniqPools,
|
||||
"default_zone_id": acc.DefaultZoneID,
|
||||
"zone_ids": acc.ZoneIDs,
|
||||
@@ -245,8 +269,6 @@ func flattenAccountList(al *account.ListAccounts) []map[string]interface{} {
|
||||
for _, acc := range al.Data {
|
||||
temp := map[string]interface{}{
|
||||
"dc_location": acc.DCLocation,
|
||||
"ckey": acc.CKey,
|
||||
"meta": flattens.FlattenMeta(acc.Meta),
|
||||
"acl": flattenRgAcl(acc.ACL),
|
||||
"company": acc.Company,
|
||||
"companyurl": acc.CompanyURL,
|
||||
@@ -267,6 +289,7 @@ func flattenAccountList(al *account.ListAccounts) []map[string]interface{} {
|
||||
"resource_types": acc.ResTypes,
|
||||
"send_access_emails": acc.SendAccessEmails,
|
||||
"status": acc.Status,
|
||||
"storage_policy_ids": acc.StoragePolicyIDs,
|
||||
"uniq_pools": acc.UniqPools,
|
||||
"default_zone_id": acc.DefaultZoneID,
|
||||
"zone_ids": acc.ZoneIDs,
|
||||
@@ -452,3 +475,11 @@ func flattenZones(zones []account.ZoneID) []map[string]interface{} {
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenZonesInResource(zones []account.ZoneID) []int64 {
|
||||
res := make([]int64, 0)
|
||||
for _, zone := range zones {
|
||||
res = append(res, zone.ID)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
@@ -144,6 +144,23 @@ func resourceAccountCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
}
|
||||
|
||||
if stPolicy, ok := d.GetOk("storage_policy"); ok {
|
||||
sp := stPolicy.(*schema.Set).List()
|
||||
storagePolicies := make([]account.StoragePolicy, 0)
|
||||
for _, elem := range sp {
|
||||
stPolicyVal := elem.(map[string]interface{})
|
||||
|
||||
reqStPolicy := account.StoragePolicy{
|
||||
ID: uint64(stPolicyVal["id"].(int)),
|
||||
Limit: stPolicyVal["limit"].(int),
|
||||
}
|
||||
|
||||
storagePolicies = append(storagePolicies, reqStPolicy)
|
||||
}
|
||||
req.StoragePolicies = storagePolicies
|
||||
|
||||
}
|
||||
|
||||
if compFeaturesInterface, ok := d.GetOk("compute_features"); ok {
|
||||
compFeaturesInterfaces := compFeaturesInterface.(*schema.Set).List()
|
||||
|
||||
@@ -372,7 +389,16 @@ func resourceAccountUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChanges("account_name", "send_access_emails", "uniq_pools", "resource_limits", "desc", "default_zone_id") {
|
||||
needUpdateSP := false
|
||||
if d.HasChange("storage_policy") {
|
||||
needUpdate, err := utilityAccountUpdateStPolicy(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
needUpdateSP = needUpdate
|
||||
}
|
||||
|
||||
if d.HasChanges("account_name", "send_access_emails", "uniq_pools", "resource_limits", "desc", "default_zone_id") || needUpdateSP {
|
||||
if err := utilityAccountUpdate(ctx, d, m); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
@@ -411,14 +437,18 @@ func resourceAccountUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
if d.HasChange("zone_ids") {
|
||||
old, new := d.GetChange("zone_ids")
|
||||
|
||||
//toAddSet := old.(*schema.Set).Difference(new.(*schema.Set))
|
||||
toRemoveSet := new.(*schema.Set).Difference(old.(*schema.Set))
|
||||
toRemoveSet := old.(*schema.Set).Difference(new.(*schema.Set))
|
||||
toAddSet := new.(*schema.Set).Difference(old.(*schema.Set))
|
||||
|
||||
/*if err := utilityZoneIDsUpdate(ctx, d, m, toAddSet); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}*/
|
||||
if err := utilityZoneIDsRemove(ctx, d, m, toRemoveSet); err != nil {
|
||||
return diag.FromErr(err)
|
||||
if len(toAddSet.List()) > 0 {
|
||||
if err := utilityZoneIDsAdd(ctx, d, m, toAddSet); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
if len(toRemoveSet.List()) > 0 {
|
||||
if err := utilityZoneIDsRemove(ctx, d, m, toRemoveSet); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Description: "if true send emails when a user is granted access to resources",
|
||||
},
|
||||
"zone_ids": {
|
||||
Type: schema.TypeList,
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
@@ -148,6 +148,39 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"limit": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"storage_policy": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"limit": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -168,17 +201,6 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"ckey": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
// "meta": {
|
||||
// Type: schema.TypeList,
|
||||
// Computed: true,
|
||||
// Elem: &schema.Schema{
|
||||
// Type: schema.TypeString,
|
||||
// },
|
||||
// },
|
||||
"acl": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
@@ -523,17 +545,6 @@ func dataSourceAccountListDeletedSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"ckey": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"meta": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"acl": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
@@ -662,6 +673,22 @@ func dataSourceAccountListDeletedSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"limit": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -680,6 +707,13 @@ func dataSourceAccountListDeletedSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"default_zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -998,6 +1032,50 @@ func dataSourceAccountResourceConsumptionGetSchemaMake() map[string]*schema.Sche
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"policies": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"seps": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"sep_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"ram": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -1058,6 +1136,50 @@ func dataSourceAccountResourceConsumptionGetSchemaMake() map[string]*schema.Sche
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"policies": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"seps": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"sep_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"ram": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -1167,6 +1289,11 @@ func dataSourceAccountListSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "Page size",
|
||||
},
|
||||
"zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Zone ID",
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
@@ -1176,17 +1303,6 @@ func dataSourceAccountListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"ckey": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"meta": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"acl": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
@@ -1319,6 +1435,22 @@ func dataSourceAccountListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"limit": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1337,6 +1469,13 @@ func dataSourceAccountListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"uniq_pools": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
@@ -1417,6 +1556,50 @@ func dataSourceAccountResourceConsumptionListSchemaMake() map[string]*schema.Sch
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"policies": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"seps": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"sep_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"ram": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -1477,6 +1660,50 @@ func dataSourceAccountResourceConsumptionListSchemaMake() map[string]*schema.Sch
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"policies": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"seps": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"sep_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"ram": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -1961,16 +2188,12 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"ckey": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"default_zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"zone_ids": {
|
||||
Type: schema.TypeList,
|
||||
Type: schema.TypeSet,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
@@ -2116,6 +2339,22 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"limit": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -2156,6 +2395,13 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"storage_policy_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -211,7 +211,8 @@ func utilityAccountUpdate(ctx context.Context, d *schema.ResourceData, m interfa
|
||||
accountId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
req := account.UpdateRequest{
|
||||
AccountID: accountId,
|
||||
AccountID: accountId,
|
||||
SendAccessEmails: d.Get("send_access_emails").(bool),
|
||||
}
|
||||
|
||||
if d.HasChange("account_name") {
|
||||
@@ -222,18 +223,14 @@ func utilityAccountUpdate(ctx context.Context, d *schema.ResourceData, m interfa
|
||||
req.Description = d.Get("desc").(string)
|
||||
}
|
||||
|
||||
if d.HasChange("send_access_emails") {
|
||||
req.SendAccessEmails = d.Get("send_access_emails").(bool)
|
||||
}
|
||||
|
||||
if d.HasChange("default_zone_id") {
|
||||
req.DefaultZoneID = uint64(d.Get("default_zone_id").(int))
|
||||
}
|
||||
|
||||
if d.HasChange("uniq_pools") {
|
||||
uniq_pools := d.Get("uniq_pools").([]interface{})
|
||||
uniqPools := d.Get("uniq_pools").([]interface{})
|
||||
|
||||
for _, pool := range uniq_pools {
|
||||
for _, pool := range uniqPools {
|
||||
req.UniqPools = append(req.UniqPools, pool.(string))
|
||||
}
|
||||
}
|
||||
@@ -292,6 +289,20 @@ func utilityAccountUpdate(ctx context.Context, d *schema.ResourceData, m interfa
|
||||
}
|
||||
}
|
||||
|
||||
_, _, updateStPolicy := utilityGetStoragePolicyUpdate(ctx, d, m)
|
||||
if len(updateStPolicy) != 0 {
|
||||
storagePolicies := make([]account.StoragePolicy, 0, len(updateStPolicy))
|
||||
for _, stPolicyVal := range updateStPolicy {
|
||||
reqStPolicy := account.StoragePolicy{
|
||||
ID: uint64(stPolicyVal["id"].(int)),
|
||||
Limit: stPolicyVal["limit"].(int),
|
||||
}
|
||||
|
||||
storagePolicies = append(storagePolicies, reqStPolicy)
|
||||
}
|
||||
req.StoragePolicies = storagePolicies
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Account().Update(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -300,6 +311,95 @@ func utilityAccountUpdate(ctx context.Context, d *schema.ResourceData, m interfa
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityAccountUpdateStPolicy(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
accountId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
addSP, delSP, updateSP := utilityGetStoragePolicyUpdate(ctx, d, m)
|
||||
needUpdate := len(updateSP) > 0
|
||||
|
||||
if len(addSP) > 0 {
|
||||
for _, spMap := range addSP {
|
||||
req := account.AddStoragePolicyRequest{
|
||||
AccountID: accountId,
|
||||
StoragePolicyID: uint64(spMap["id"].(int)),
|
||||
Limit: spMap["limit"].(int),
|
||||
}
|
||||
log.Debugf("utilityAccountUpdateStPolicy: starting to add storage policy ID:%d for account %d", req.StoragePolicyID, req.AccountID)
|
||||
_, err := c.CloudBroker().Account().AddStoragePolicy(ctx, req)
|
||||
if err != nil {
|
||||
return needUpdate, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(delSP) > 0 {
|
||||
for _, spMap := range delSP {
|
||||
req := account.DelStoragePolicyRequest{
|
||||
AccountID: accountId,
|
||||
StoragePolicyID: uint64(spMap["id"].(int)),
|
||||
}
|
||||
log.Debugf("utilityAccountUpdateStPolicy: starting to delete storage policy ID:%d from account %d", req.StoragePolicyID, req.AccountID)
|
||||
_, err := c.CloudBroker().Account().DelStoragePolicy(ctx, req)
|
||||
if err != nil {
|
||||
return needUpdate, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return needUpdate, nil
|
||||
}
|
||||
|
||||
func utilityGetStoragePolicyUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) (added, deleted, updated []map[string]interface{}) {
|
||||
added = make([]map[string]interface{}, 0)
|
||||
deleted = make([]map[string]interface{}, 0)
|
||||
updated = make([]map[string]interface{}, 0)
|
||||
oldSet, newSet := d.GetChange("storage_policy")
|
||||
oldList := oldSet.(*schema.Set).List()
|
||||
newList := newSet.(*schema.Set).List()
|
||||
|
||||
for _, oldSP := range oldList {
|
||||
oldMap := oldSP.(map[string]interface{})
|
||||
found := false
|
||||
for _, newSP := range newList {
|
||||
newMap := newSP.(map[string]interface{})
|
||||
if oldMap["id"] == newMap["id"] {
|
||||
found = true
|
||||
if oldMap["limit"] != newMap["limit"] {
|
||||
updated = append(added, newMap)
|
||||
}
|
||||
break
|
||||
}
|
||||
if found {
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
deleted = append(deleted, oldMap)
|
||||
}
|
||||
|
||||
for _, newSP := range newList {
|
||||
newMap := newSP.(map[string]interface{})
|
||||
found := false
|
||||
for _, oldSP := range oldList {
|
||||
oldMap := oldSP.(map[string]interface{})
|
||||
if oldMap["id"] == newMap["id"] {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
added = append(added, newMap)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func utilityAccountAvailiableTemplatesUpdate(ctx context.Context, d *schema.ResourceData, m interface{}, afterCreate bool) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
|
||||
@@ -72,6 +72,10 @@ func utilityAccountListCheckPresence(ctx context.Context, d *schema.ResourceData
|
||||
req.Status = status.(string)
|
||||
}
|
||||
|
||||
if zoneID, ok := d.GetOk("zone_id"); ok {
|
||||
req.ZoneID = uint64(zoneID.(int))
|
||||
}
|
||||
|
||||
log.Debugf("utilityAccountListCheckPresence: load account list")
|
||||
accountList, err := c.CloudBroker().Account().List(ctx, req)
|
||||
if err != nil {
|
||||
|
||||
@@ -43,6 +43,7 @@ func flattenAudit(d *schema.ResourceData, au *audit.RecordAudit) {
|
||||
|
||||
d.Set("args", au.Arguments)
|
||||
d.Set("call", au.Call)
|
||||
d.Set("correlation_id", au.CorrelationID)
|
||||
d.Set("guid", au.GUID)
|
||||
d.Set("kwargs", au.Kwargs)
|
||||
d.Set("remote_addr", au.RemoteAddr)
|
||||
@@ -60,19 +61,19 @@ func flattenAuditList(au *audit.ListAudits) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(au.Data))
|
||||
for _, item := range au.Data {
|
||||
temp := map[string]interface{}{
|
||||
"args": item.Args,
|
||||
"call": item.Call,
|
||||
"guid": item.GUID,
|
||||
"kwargs": item.Kwargs,
|
||||
"remote_addr": item.RemoteAddr,
|
||||
"result": item.Result,
|
||||
"responsetime": item.ResponseTime,
|
||||
"status_code": item.StatusCode,
|
||||
"tags": item.Tags,
|
||||
"timestamp": item.Timestamp,
|
||||
"timestamp_end": item.TimestampEnd,
|
||||
"ttl": item.TTL,
|
||||
"user": item.User,
|
||||
"args": item.Args,
|
||||
"call": item.Call,
|
||||
"correlation_id": item.CorrelationID,
|
||||
"guid": item.GUID,
|
||||
"kwargs": item.Kwargs,
|
||||
"remote_addr": item.RemoteAddr,
|
||||
"result": item.Result,
|
||||
"responsetime": item.ResponseTime,
|
||||
"status_code": item.StatusCode,
|
||||
"timestamp": item.Timestamp,
|
||||
"timestamp_end": item.TimestampEnd,
|
||||
"ttl": item.TTL,
|
||||
"user": item.User,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,10 @@ func dataSourceAuditSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"correlation_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -42,10 +46,11 @@ func dataSourceAuditSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"tags": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
//TODO
|
||||
//"tags": {
|
||||
// Type: schema.TypeString,
|
||||
// Computed: true,
|
||||
//},
|
||||
"timestamp": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
@@ -117,6 +122,50 @@ func dataSourceAuditListSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "page size",
|
||||
},
|
||||
"resgroup_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"vins_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"service_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"k8s_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"flipgroup_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"lb_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"node_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"exclude_audit_lines": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
@@ -126,6 +175,10 @@ func dataSourceAuditListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"correlation_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -170,10 +223,6 @@ func dataSourceAuditListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"tags": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -76,6 +76,39 @@ func utilityAuditListCheckPresence(ctx context.Context, d *schema.ResourceData,
|
||||
if Size, ok := d.GetOk("size"); ok {
|
||||
req.Size = uint64(Size.(int))
|
||||
}
|
||||
if resgroupID, ok := d.GetOk("resgroup_id"); ok {
|
||||
req.RGID = uint64(resgroupID.(int))
|
||||
}
|
||||
if computeID, ok := d.GetOk("compute_id"); ok {
|
||||
req.ComputeID = uint64(computeID.(int))
|
||||
}
|
||||
if accountID, ok := d.GetOk("account_id"); ok {
|
||||
req.AccountID = uint64(accountID.(int))
|
||||
}
|
||||
if vinsID, ok := d.GetOk("vins_id"); ok {
|
||||
req.VINSID = uint64(vinsID.(int))
|
||||
}
|
||||
if serviceID, ok := d.GetOk("service_id"); ok {
|
||||
req.ServiceID = uint64(serviceID.(int))
|
||||
}
|
||||
if k8sID, ok := d.GetOk("k8s_id"); ok {
|
||||
req.K8SID = uint64(k8sID.(int))
|
||||
}
|
||||
if flipgroupID, ok := d.GetOk("flipgroup_id"); ok {
|
||||
req.FLIPGroupID = uint64(flipgroupID.(int))
|
||||
}
|
||||
if lbID, ok := d.GetOk("lb_id"); ok {
|
||||
req.LBID = uint64(lbID.(int))
|
||||
}
|
||||
if sepID, ok := d.GetOk("sep_id"); ok {
|
||||
req.SEPID = uint64(sepID.(int))
|
||||
}
|
||||
if nodeID, ok := d.GetOk("node_id"); ok {
|
||||
req.NodeID = uint64(nodeID.(int))
|
||||
}
|
||||
if excludeAuditLines, ok := d.GetOk("exclude_audit_lines"); ok {
|
||||
req.ExcludeAuditLines = excludeAuditLines.(bool)
|
||||
}
|
||||
|
||||
log.Debugf("utilityAuditListCheckPresence: load audit list")
|
||||
auditList, err := c.CloudBroker().Audit().List(ctx, req)
|
||||
|
||||
@@ -58,11 +58,13 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
|
||||
d.Set("size_used", disk.SizeUsed)
|
||||
d.Set("snapshots", flattendDiskSnapshotList(disk.Snapshots))
|
||||
d.Set("status", disk.Status)
|
||||
d.Set("storage_policy_id", disk.StoragePolicyID)
|
||||
d.Set("tech_status", disk.TechStatus)
|
||||
d.Set("type", disk.Type)
|
||||
d.Set("vmid", disk.VMID)
|
||||
d.Set("updated_by", disk.UpdatedBy)
|
||||
d.Set("updated_time", disk.UpdatedTime)
|
||||
d.Set("to_clean", disk.ToClean)
|
||||
}
|
||||
|
||||
func flattenDiskReplica(d *schema.ResourceData, disk *disks.RecordDisk, statusReplication string) {
|
||||
@@ -224,11 +226,13 @@ func flattenDiskList(dl *disks.ListDisks) []map[string]interface{} {
|
||||
"size_used": disk.SizeUsed,
|
||||
"snapshots": flattendDiskSnapshotList(disk.Snapshots),
|
||||
"status": disk.Status,
|
||||
"storage_policy_id": disk.StoragePolicyID,
|
||||
"tech_status": disk.TechStatus,
|
||||
"type": disk.Type,
|
||||
"vmid": disk.VMID,
|
||||
"updated_by": disk.UpdatedBy,
|
||||
"updated_time": disk.UpdatedTime,
|
||||
"to_clean": disk.ToClean,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
@@ -57,19 +57,16 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
|
||||
}
|
||||
|
||||
req := disks.CreateRequest{
|
||||
AccountID: uint64(d.Get("account_id").(int)),
|
||||
Name: d.Get("disk_name").(string),
|
||||
Size: uint64(d.Get("size_max").(int)),
|
||||
AccountID: uint64(d.Get("account_id").(int)),
|
||||
StoragePolicyID: uint64(d.Get("storage_policy_id").(int)),
|
||||
Name: d.Get("disk_name").(string),
|
||||
Size: uint64(d.Get("size_max").(int)),
|
||||
}
|
||||
|
||||
if desc, ok := d.GetOk("desc"); ok {
|
||||
req.Description = desc.(string)
|
||||
}
|
||||
|
||||
if iops, ok := d.GetOk("iops"); ok {
|
||||
req.IOPS = uint64(iops.(int))
|
||||
}
|
||||
|
||||
if sepID, ok := d.GetOk("sep_id"); ok {
|
||||
req.SEPID = uint64(sepID.(int))
|
||||
}
|
||||
@@ -202,6 +199,12 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("storage_policy_id") {
|
||||
if err := resourceDiskChangeStoragePolicyID(ctx, d, m); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("size_max") {
|
||||
oldSize, newSize := d.GetChange("size_max")
|
||||
if oldSize.(int) > newSize.(int) {
|
||||
@@ -297,8 +300,6 @@ func resourceDiskChangeIotune(ctx context.Context, d *schema.ResourceData, m int
|
||||
|
||||
if _, ok := iotune["total_iops_sec"]; ok {
|
||||
req.IOPS = uint64(iotune["total_iops_sec"].(int))
|
||||
} else if _, ok := d.GetOk("iops"); ok {
|
||||
req.IOPS = uint64(d.Get("iops").(int))
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Disks().LimitIO(ctx, req)
|
||||
@@ -351,6 +352,16 @@ func resourceDiskChangeSize(ctx context.Context, d *schema.ResourceData, m inter
|
||||
return err
|
||||
}
|
||||
|
||||
func resourceDiskChangeStoragePolicyID(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
_, err := c.CloudBroker().Disks().ChangeDiskStoragePolicy(ctx, disks.ChangeDiskStoragePolicyRequest{
|
||||
DiskID: uint64(d.Get("disk_id").(int)),
|
||||
StoragePolicyID: uint64(d.Get("storage_policy_id").(int)),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func resourceDiskChangeNodes(ctx context.Context, d *schema.ResourceData, m interface{}, afterCreate bool) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
diskID := uint64(d.Get("disk_id").(int))
|
||||
|
||||
@@ -333,6 +333,11 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
Description: "Storage policy ID",
|
||||
},
|
||||
"tech_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -353,6 +358,10 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"to_clean": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
|
||||
return rets
|
||||
@@ -425,6 +434,11 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "Page size",
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "storage policy ID ",
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
@@ -757,6 +771,11 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
Description: "Storage policy ID",
|
||||
},
|
||||
"tech_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -777,6 +796,10 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"to_clean": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1172,6 +1195,11 @@ func dataSourceDiskListDeletedSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
Description: "Storage policy ID",
|
||||
},
|
||||
"tech_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -1192,6 +1220,10 @@ func dataSourceDiskListDeletedSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"to_clean": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1358,6 +1390,11 @@ func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "Page size",
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "storage policy ID ",
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
@@ -1863,6 +1900,10 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Required: true,
|
||||
//ForceNew: true,
|
||||
},
|
||||
"storage_policy_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"created_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -1889,11 +1930,6 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
|
||||
},
|
||||
"iops": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "max IOPS disk can perform",
|
||||
},
|
||||
"sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
@@ -2252,6 +2288,10 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"to_clean": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
|
||||
return rets
|
||||
|
||||
@@ -85,6 +85,9 @@ func utilityDiskListCheckPresence(ctx context.Context, d *schema.ResourceData, m
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
req.Size = uint64(size.(int))
|
||||
}
|
||||
if storagePolicyID, ok := d.GetOk("storage_policy_id"); ok {
|
||||
req.StoragePolicyID = uint64(storagePolicyID.(int))
|
||||
}
|
||||
|
||||
log.Debugf("utilityDiskListCheckPresence: load disk list")
|
||||
diskList, err := c.CloudBroker().Disks().List(ctx, req)
|
||||
|
||||
@@ -46,6 +46,9 @@ func utilityDiskListUnattachedCheckPresence(ctx context.Context, d *schema.Resou
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
req.Size = uint64(size.(int))
|
||||
}
|
||||
if storagePolicyID, ok := d.GetOk("storage_policy_id"); ok {
|
||||
req.StoragePolicyID = uint64(storagePolicyID.(int))
|
||||
}
|
||||
|
||||
log.Debugf("utilityDiskListUnattachedCheckPresence: load disk Unattached list")
|
||||
unattachedList, err := c.CloudBroker().Disks().ListUnattached(ctx, req)
|
||||
|
||||
@@ -19,24 +19,26 @@ func flattenDPDKNet(d *schema.ResourceData, dpdk *dpdk.RecordDPDKNet) {
|
||||
d.Set("vlan_id", dpdk.VlanID)
|
||||
d.Set("compute_ids", dpdk.ComputeIDs)
|
||||
d.Set("updated_time", dpdk.UpdatedTime)
|
||||
d.Set("enable_secgroups", dpdk.EnableSecGroups)
|
||||
}
|
||||
|
||||
func flattenDPDKNetList(list *dpdk.ListDPDKNet) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(list.Data))
|
||||
for _, dpdk := range list.Data {
|
||||
temp := map[string]interface{}{
|
||||
"dpdk_id": dpdk.ID,
|
||||
"account_access": dpdk.AccountAccess,
|
||||
"desc": dpdk.Description,
|
||||
"gid": dpdk.GID,
|
||||
"guid": dpdk.GUID,
|
||||
"name": dpdk.Name,
|
||||
"rg_access": dpdk.RGAccess,
|
||||
"status": dpdk.Status,
|
||||
"ovs_bridge": dpdk.OVSBridge,
|
||||
"vlan_id": dpdk.VlanID,
|
||||
"compute_ids": dpdk.ComputeIDs,
|
||||
"updated_time": dpdk.UpdatedTime,
|
||||
"dpdk_id": dpdk.ID,
|
||||
"account_access": dpdk.AccountAccess,
|
||||
"desc": dpdk.Description,
|
||||
"enable_secgroups": dpdk.EnableSecGroups,
|
||||
"gid": dpdk.GID,
|
||||
"guid": dpdk.GUID,
|
||||
"name": dpdk.Name,
|
||||
"rg_access": dpdk.RGAccess,
|
||||
"status": dpdk.Status,
|
||||
"ovs_bridge": dpdk.OVSBridge,
|
||||
"vlan_id": dpdk.VlanID,
|
||||
"compute_ids": dpdk.ComputeIDs,
|
||||
"updated_time": dpdk.UpdatedTime,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
@@ -93,6 +93,17 @@ func resourceDPDKNetCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
warnings.Add(err)
|
||||
}
|
||||
|
||||
if d.Get("enable_secgroups").(bool) {
|
||||
req := dpdk.UpdateRequest{
|
||||
DPDKID: dpdkID,
|
||||
EnableSecGroups: true,
|
||||
}
|
||||
_, err := c.CloudBroker().DPDKNet().Update(ctx, req)
|
||||
if err != nil {
|
||||
warnings.Add(err)
|
||||
}
|
||||
}
|
||||
|
||||
return append(warnings.Get(), resourceDPDKNetRead(ctx, d, m)...)
|
||||
}
|
||||
|
||||
@@ -167,7 +178,7 @@ func resourceDPDKNetUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChanges("name", "desc", "vlan_id", "ovs_bridge", "account_access", "rg_access") {
|
||||
if d.HasChanges("name", "desc", "vlan_id", "ovs_bridge", "account_access", "rg_access", "enable_secgroups") {
|
||||
if err := utilityDPDKNetUpdate(ctx, d, m); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
@@ -161,6 +161,10 @@ func dataSourceDPDKNetListSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
Description: "Description of DPDK network",
|
||||
},
|
||||
"enable_secgroups": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"gid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -275,6 +279,12 @@ func resourceDPDKNetSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "Enabled or disabled DPDK network",
|
||||
},
|
||||
"enable_secgroups": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
Description: "enable security groups",
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
|
||||
@@ -148,6 +148,10 @@ func utilityDPDKNetUpdate(ctx context.Context, d *schema.ResourceData, m interfa
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("enable_secgroups") {
|
||||
req.EnableSecGroups = d.Get("enable_secgroups").(bool)
|
||||
}
|
||||
|
||||
if _, err := c.CloudBroker().DPDKNet().Update(ctx, req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -51,11 +51,12 @@ func flattenListExtnet(extList *extnet.ListExtNet) []map[string]interface{} {
|
||||
"free_ips": item.FreeIPs,
|
||||
"gid": item.GID,
|
||||
"guid": item.GUID,
|
||||
"enable_secgroups": item.EnableSecGroups,
|
||||
"extnet_id": item.ID,
|
||||
"ipcidr": item.IPCIDR,
|
||||
"milestones": item.Milestones,
|
||||
"name": item.Name,
|
||||
"network_ids": item.NetworkIDs,
|
||||
"network_ids": flattenNetworkIDs(item.NetworkIDs),
|
||||
"ovs_bridge": item.OVSBridge,
|
||||
"pre_reservations_num": item.PreReservationsNum,
|
||||
"pri_vnfdev_id": item.PriVNFDevID,
|
||||
@@ -143,6 +144,7 @@ func flattenRecordExtnetResource(d *schema.ResourceData, recNet *extnet.RecordEx
|
||||
d.Set("reservations", flattenExtnetReservations(recNet.Reservations))
|
||||
d.Set("routes", flattenStaticRouteList(staticRouteList))
|
||||
d.Set("zone_id", recNet.ZoneID)
|
||||
d.Set("enable_secgroups", recNet.EnableSecGroups)
|
||||
}
|
||||
|
||||
func flattenExtnetExcluded(ers extnet.ListReservations) []map[string]interface{} {
|
||||
|
||||
@@ -133,7 +133,7 @@ func resourceExtnetCreate(ctx context.Context, d *schema.ResourceData, m interfa
|
||||
}
|
||||
|
||||
if mtu, ok := d.GetOk("mtu"); ok {
|
||||
req.MTU = mtu.(uint)
|
||||
req.MTU = uint(mtu.(int))
|
||||
}
|
||||
|
||||
log.Debugf("cloudbroker: Sent create request")
|
||||
@@ -234,6 +234,18 @@ func resourceExtnetCreate(ctx context.Context, d *schema.ResourceData, m interfa
|
||||
}
|
||||
}
|
||||
|
||||
if d.Get("enable_secgroups").(bool) {
|
||||
log.Debugf("resourceExtnetCreate: trying to enable secgroups for extnet with ID %d", netID)
|
||||
req := extnet.UpdateRequest{
|
||||
NetID: netID,
|
||||
EnableSecGroups: true,
|
||||
}
|
||||
_, err := c.CloudBroker().ExtNet().Update(ctx, req)
|
||||
if err != nil {
|
||||
w.Add(err)
|
||||
}
|
||||
}
|
||||
|
||||
return resourceExtnetRead(ctx, d, m)
|
||||
}
|
||||
|
||||
|
||||
@@ -65,6 +65,11 @@ func dataSourceExtnetListSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "Page size",
|
||||
},
|
||||
"zone_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Zone ID",
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
@@ -130,6 +135,10 @@ func dataSourceExtnetListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"enable_secgroups": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"extnet_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -700,6 +709,12 @@ func resourceExtnetSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"enable_secgroups": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
Description: "enable security groups",
|
||||
},
|
||||
"dns": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
|
||||
@@ -86,6 +86,10 @@ func utilityExtnetListCheckPresence(ctx context.Context, d *schema.ResourceData,
|
||||
req.Size = uint64(size.(int))
|
||||
}
|
||||
|
||||
if zoneID, ok := d.GetOk("zone_id"); ok {
|
||||
req.ZoneID = uint64(zoneID.(int))
|
||||
}
|
||||
|
||||
res, err := c.CloudBroker().ExtNet().List(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user