From af82decadd57ba228f5f47712cd9b1aee4af20b6 Mon Sep 17 00:00:00 2001 From: KasimBaybikov Date: Fri, 10 Mar 2023 12:42:15 +0300 Subject: [PATCH] 3.5.0 --- CHANGELOG.md | 63 +- .../provider/cloudapi/data_sources_map.go | 17 + internal/service/cloudapi/kvmvm/api.go | 64 +- .../cloudapi/kvmvm/data_source_compute.go | 1118 ++++++++++----- .../kvmvm/data_source_compute_audits.go | 74 + .../kvmvm/data_source_compute_get_audits.go | 62 + .../data_source_compute_get_console_url.go | 52 + .../kvmvm/data_source_compute_get_log.go | 53 + .../kvmvm/data_source_compute_list.go | 335 +++++ .../kvmvm/data_source_compute_pfw_list.go | 81 ++ .../kvmvm/data_source_compute_user_list.go | 45 + internal/service/cloudapi/kvmvm/flattens.go | 613 +++++++- internal/service/cloudapi/kvmvm/models.go | 887 +++++++++++- .../cloudapi/kvmvm/osusers_subresource.go | 10 +- .../kvmvm/resource_check_input_values.go | 106 ++ .../cloudapi/kvmvm/resource_compute.go | 1250 +++++++++++++---- .../service/cloudapi/kvmvm/utility_compute.go | 87 +- .../cloudapi/kvmvm/utility_compute_audits.go | 29 + .../kvmvm/utility_compute_boot_disk.go | 23 + .../kvmvm/utility_compute_get_audits.go | 29 + .../kvmvm/utility_compute_get_console_url.go | 23 + .../cloudapi/kvmvm/utility_compute_get_log.go | 24 + .../cloudapi/kvmvm/utility_compute_list.go | 39 + .../kvmvm/utility_compute_pfw_list.go | 30 + .../kvmvm/utility_compute_user_list.go | 28 + .../kvmvm/utility_data_source_compute.go | 29 + internal/service/cloudapi/rg/api.go | 29 +- .../service/cloudapi/rg/data_source_rg.go | 563 +++++--- .../data_source_rg_affinity_group_computes.go | 108 ++ .../rg/data_source_rg_affinity_groups_get.go | 60 + .../rg/data_source_rg_affinity_groups_list.go | 67 + .../cloudapi/rg/data_source_rg_audits.go | 77 + .../cloudapi/rg/data_source_rg_list.go | 147 +- .../rg/data_source_rg_list_computes.go | 193 +++ .../rg/data_source_rg_list_deleted.go | 196 +++ .../cloudapi/rg/data_source_rg_list_lb.go | 357 +++++ .../cloudapi/rg/data_source_rg_list_pfw.go | 89 ++ .../cloudapi/rg/data_source_rg_list_vins.go | 126 ++ .../cloudapi/rg/data_source_rg_usage.go | 99 ++ internal/service/cloudapi/rg/flattens.go | 528 ++++++- internal/service/cloudapi/rg/models.go | 796 +++++++++-- .../service/cloudapi/rg/quota_subresource.go | 39 +- .../rg/resource_check_input_values.go | 111 ++ internal/service/cloudapi/rg/resource_rg.go | 766 +++++++--- internal/service/cloudapi/rg/utility_rg.go | 30 +- .../rg/utility_rg_affinity_group_computes.go | 32 + .../rg/utility_rg_affinity_groups_get.go | 33 + .../rg/utility_rg_affinity_groups_list.go | 32 + .../service/cloudapi/rg/utility_rg_audits.go | 30 + .../service/cloudapi/rg/utility_rg_list.go | 4 +- .../cloudapi/rg/utility_rg_list_computes.go | 35 + .../cloudapi/rg/utility_rg_list_deleted.go | 37 + .../service/cloudapi/rg/utility_rg_list_lb.go | 32 + .../cloudapi/rg/utility_rg_list_pfw.go | 32 + .../cloudapi/rg/utility_rg_list_vins.go | 36 + .../service/cloudapi/rg/utility_rg_usage.go | 35 + .../service/cloudapi/vins/resource_vins.go | 1 - samples/cloudapi/data_kvmvm/main.tf | 19 +- samples/cloudapi/data_kvmvm_audits/main.tf | 36 + .../cloudapi/data_kvmvm_get_audits/main.tf | 36 + .../data_kvmvm_get_console_url/main.tf | 36 + samples/cloudapi/data_kvmvm_get_log/main.tf | 41 + samples/cloudapi/data_kvmvm_list/main.tf | 50 + samples/cloudapi/data_kvmvm_pfw_list/main.tf | 36 + samples/cloudapi/data_kvmvm_user_list/main.tf | 36 + samples/cloudapi/data_rg/main.tf | 42 + .../data_rg_affinity_group_computes/main.tf | 40 + .../data_rg_affinity_groups_get/main.tf | 41 + .../data_rg_affinity_groups_list/main.tf | 36 + samples/cloudapi/data_rg_audits/main.tf | 36 + samples/cloudapi/data_rg_list/main.tf | 45 +- .../cloudapi/data_rg_list_computes/main.tf | 40 + samples/cloudapi/data_rg_list_deleted/main.tf | 41 + samples/cloudapi/data_rg_list_lb/main.tf | 36 + samples/cloudapi/data_rg_list_pfw/main.tf | 36 + samples/cloudapi/data_rg_list_vins/main.tf | 40 + samples/cloudapi/data_rg_usage/main.tf | 40 + samples/cloudapi/resource_kvmvm/main.tf | 217 ++- samples/cloudapi/resource_rg/main.tf | 170 +++ 79 files changed, 9448 insertions(+), 1593 deletions(-) create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_audits.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_get_audits.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_get_console_url.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_get_log.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_list.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_pfw_list.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_user_list.go create mode 100644 internal/service/cloudapi/kvmvm/resource_check_input_values.go create mode 100644 internal/service/cloudapi/kvmvm/utility_compute_audits.go create mode 100644 internal/service/cloudapi/kvmvm/utility_compute_boot_disk.go create mode 100644 internal/service/cloudapi/kvmvm/utility_compute_get_audits.go create mode 100644 internal/service/cloudapi/kvmvm/utility_compute_get_console_url.go create mode 100644 internal/service/cloudapi/kvmvm/utility_compute_get_log.go create mode 100644 internal/service/cloudapi/kvmvm/utility_compute_list.go create mode 100644 internal/service/cloudapi/kvmvm/utility_compute_pfw_list.go create mode 100644 internal/service/cloudapi/kvmvm/utility_compute_user_list.go create mode 100644 internal/service/cloudapi/kvmvm/utility_data_source_compute.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_affinity_group_computes.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_affinity_groups_get.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_affinity_groups_list.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_audits.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_list_computes.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_list_deleted.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_list_lb.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_list_pfw.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_list_vins.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_usage.go create mode 100644 internal/service/cloudapi/rg/resource_check_input_values.go create mode 100644 internal/service/cloudapi/rg/utility_rg_affinity_group_computes.go create mode 100644 internal/service/cloudapi/rg/utility_rg_affinity_groups_get.go create mode 100644 internal/service/cloudapi/rg/utility_rg_affinity_groups_list.go create mode 100644 internal/service/cloudapi/rg/utility_rg_audits.go create mode 100644 internal/service/cloudapi/rg/utility_rg_list_computes.go create mode 100644 internal/service/cloudapi/rg/utility_rg_list_deleted.go create mode 100644 internal/service/cloudapi/rg/utility_rg_list_lb.go create mode 100644 internal/service/cloudapi/rg/utility_rg_list_pfw.go create mode 100644 internal/service/cloudapi/rg/utility_rg_list_vins.go create mode 100644 internal/service/cloudapi/rg/utility_rg_usage.go create mode 100644 samples/cloudapi/data_kvmvm_audits/main.tf create mode 100644 samples/cloudapi/data_kvmvm_get_audits/main.tf create mode 100644 samples/cloudapi/data_kvmvm_get_console_url/main.tf create mode 100644 samples/cloudapi/data_kvmvm_get_log/main.tf create mode 100644 samples/cloudapi/data_kvmvm_list/main.tf create mode 100644 samples/cloudapi/data_kvmvm_pfw_list/main.tf create mode 100644 samples/cloudapi/data_kvmvm_user_list/main.tf create mode 100644 samples/cloudapi/data_rg/main.tf create mode 100644 samples/cloudapi/data_rg_affinity_group_computes/main.tf create mode 100644 samples/cloudapi/data_rg_affinity_groups_get/main.tf create mode 100644 samples/cloudapi/data_rg_affinity_groups_list/main.tf create mode 100644 samples/cloudapi/data_rg_audits/main.tf create mode 100644 samples/cloudapi/data_rg_list_computes/main.tf create mode 100644 samples/cloudapi/data_rg_list_deleted/main.tf create mode 100644 samples/cloudapi/data_rg_list_lb/main.tf create mode 100644 samples/cloudapi/data_rg_list_pfw/main.tf create mode 100644 samples/cloudapi/data_rg_list_vins/main.tf create mode 100644 samples/cloudapi/data_rg_usage/main.tf create mode 100644 samples/cloudapi/resource_rg/main.tf diff --git a/CHANGELOG.md b/CHANGELOG.md index 9fe835b..99bf07b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,56 @@ -### Version 3.4.3 +### Version 3.5.0 -### Features +## Features -- Change field type disksize from int to float in: - - resource decort_resgroup - - resource decort_account - - data source decort_rg - - data source decort_account - - data source decort_account_rg_list -- Models of the resources +#### Resgroup +- Add data source rg_affinity_group_computes +- Add data source rg_affinity_groups_get +- Add data source rg_affinity_groups_list +- Add data source rg_audits +- Add data source rg_list +- Add data source rg_list_computes +- Add data source rg_list_deleted +- Add data source rg_list_lb +- Add data source rg_list_pfw +- Add data source rg_list_vins +- Add data source rg_usage +- Update data source rg +- Update block 'qouta' to change resource limits +- Add block 'access' to access/revoke rights for rg +- Add block 'def_net' to set default network in rg +- Add field 'enable' to disable/enable rg +- Add processing of input parameters (account_id, gid, ext_net_id) when creating and updating a resource + +#### Kvmvm +- Update data source decort_kvmvm +- Add data source decort_kvmvm_list +- Add data source decort_kvmvm_audits +- Add data source decort_kvmvm_get_audits +- Add data source decort_kvmvm_get_console_url +- Add data source decort_kvmvm_get_log +- Add data source decort_kvmvm_pfw_list +- Add data source decort_kvmvm_user_list +- Update block 'disks' in the resource decort_kvmvm +- Add block 'tags' to add/delete tags +- Add block 'port_forwarding' to add/delete pfws +- Add block 'user_access' to access/revoke user rights for comptue +- Add block 'snapshot' to create/delete snapshots +- Add block 'rollback' to rollback in snapshot +- Add block 'cd' to insert/Eject cdROM disks +- Add field 'pin_to_stack' to pin compute to stack +- Add field 'pause' to pause/resume compute +- Add field 'reset' to reset compute +- Add the ability to redeploy the compute when changing the image_id +- Add field 'data_disks' to redeploy compute +- Add field 'auto_start' to redeploy compute +- Add field 'force_stop' to redeploy compute +- Add warnings in Create resource decort_kvmvm +- Add processing of input parameters (rg_id, image_id and all vins_id's in blocks 'network') when creating and updating a resource + +## Bug Fix + +- When deleting the 'quote' block, the limits are not set to the default value +- Block 'disks' in resource decort_kvmvm breaks the state +- Import decort_resgroup resource breaks the state +- Import decort_kvmvm resource breaks the state +- If the boot_disk_size is not specified at creation, further changing it leads to an error diff --git a/internal/provider/cloudapi/data_sources_map.go b/internal/provider/cloudapi/data_sources_map.go index c083b6a..c4d00d4 100644 --- a/internal/provider/cloudapi/data_sources_map.go +++ b/internal/provider/cloudapi/data_sources_map.go @@ -40,6 +40,13 @@ func NewDataSourcesMap() map[string]*schema.Resource { "decort_account": account.DataSourceAccount(), "decort_resgroup": rg.DataSourceResgroup(), "decort_kvmvm": kvmvm.DataSourceCompute(), + "decort_kvmvm_list": kvmvm.DataSourceComputeList(), + "decort_kvmvm_audits": kvmvm.DataSourceComputeAudits(), + "decort_kvmvm_get_audits": kvmvm.DataSourceComputeGetAudits(), + "decort_kvmvm_get_console_url": kvmvm.DataSourceComputeGetConsoleUrl(), + "decort_kvmvm_get_log": kvmvm.DataSourceComputeGetLog(), + "decort_kvmvm_pfw_list": kvmvm.DataSourceComputePfwList(), + "decort_kvmvm_user_list": kvmvm.DataSourceComputeUserList(), "decort_k8s": k8s.DataSourceK8s(), "decort_k8s_list": k8s.DataSourceK8sList(), "decort_k8s_list_deleted": k8s.DataSourceK8sListDeleted(), @@ -56,6 +63,16 @@ func NewDataSourcesMap() map[string]*schema.Resource { "decort_disk": disks.DataSourceDisk(), "decort_disk_list": disks.DataSourceDiskList(), "decort_rg_list": rg.DataSourceRgList(), + "decort_rg_affinity_group_computes": rg.DataSourceRgAffinityGroupComputes(), + "decort_rg_affinity_groups_list": rg.DataSourceRgAffinityGroupsList(), + "decort_rg_affinity_groups_get": rg.DataSourceRgAffinityGroupsGet(), + "decort_rg_audits": rg.DataSourceRgAudits(), + "decort_rg_list_computes": rg.DataSourceRgListComputes(), + "decort_rg_list_deleted": rg.DataSourceRgListDeleted(), + "decort_rg_list_lb": rg.DataSourceRgListLb(), + "decort_rg_list_pfw": rg.DataSourceRgListPfw(), + "decort_rg_list_vins": rg.DataSourceRgListVins(), + "decort_rg_usage": rg.DataSourceRgUsage(), "decort_disk_list_types_detailed": disks.DataSourceDiskListTypesDetailed(), "decort_disk_list_types": disks.DataSourceDiskListTypes(), "decort_disk_list_deleted": disks.DataSourceDiskListDeleted(), diff --git a/internal/service/cloudapi/kvmvm/api.go b/internal/service/cloudapi/kvmvm/api.go index 2096cce..238a17c 100644 --- a/internal/service/cloudapi/kvmvm/api.go +++ b/internal/service/cloudapi/kvmvm/api.go @@ -33,27 +33,25 @@ Documentation: https://github.com/rudecs/terraform-provider-decort/wiki package kvmvm const ( - KvmX86CreateAPI = "/restmachine/cloudapi/kvmx86/create" - KvmPPCCreateAPI = "/restmachine/cloudapi/kvmppc/create" - ComputeGetAPI = "/restmachine/cloudapi/compute/get" - RgListComputesAPI = "/restmachine/cloudapi/rg/listComputes" - ComputeNetAttachAPI = "/restmachine/cloudapi/compute/netAttach" - ComputeNetDetachAPI = "/restmachine/cloudapi/compute/netDetach" - ComputeDiskAttachAPI = "/restmachine/cloudapi/compute/diskAttach" - ComputeDiskDetachAPI = "/restmachine/cloudapi/compute/diskDetach" - ComputeStartAPI = "/restmachine/cloudapi/compute/start" - ComputeStopAPI = "/restmachine/cloudapi/compute/stop" - ComputeResizeAPI = "/restmachine/cloudapi/compute/resize" - DisksResizeAPI = "/restmachine/cloudapi/disks/resize2" - ComputeDeleteAPI = "/restmachine/cloudapi/compute/delete" - ComputeUpdateAPI = "/restmachine/cloudapi/compute/update" - ComputeDiskAddAPI = "/restmachine/cloudapi/compute/diskAdd" - ComputeDiskDeleteAPI = "/restmachine/cloudapi/compute/diskDel" - ComputeRestoreAPI = "/restmachine/cloudapi/compute/restore" - ComputeEnableAPI = "/restmachine/cloudapi/compute/enable" - ComputeDisableAPI = "/restmachine/cloudapi/compute/disable" - - //affinity and anti-affinity + KvmX86CreateAPI = "/restmachine/cloudapi/kvmx86/create" + KvmPPCCreateAPI = "/restmachine/cloudapi/kvmppc/create" + ComputeGetAPI = "/restmachine/cloudapi/compute/get" + RgListComputesAPI = "/restmachine/cloudapi/rg/listComputes" + ComputeNetAttachAPI = "/restmachine/cloudapi/compute/netAttach" + ComputeNetDetachAPI = "/restmachine/cloudapi/compute/netDetach" + ComputeDiskAttachAPI = "/restmachine/cloudapi/compute/diskAttach" + ComputeDiskDetachAPI = "/restmachine/cloudapi/compute/diskDetach" + ComputeStartAPI = "/restmachine/cloudapi/compute/start" + ComputeStopAPI = "/restmachine/cloudapi/compute/stop" + ComputeResizeAPI = "/restmachine/cloudapi/compute/resize" + DisksResizeAPI = "/restmachine/cloudapi/disks/resize2" + ComputeDeleteAPI = "/restmachine/cloudapi/compute/delete" + ComputeUpdateAPI = "/restmachine/cloudapi/compute/update" + ComputeDiskAddAPI = "/restmachine/cloudapi/compute/diskAdd" + ComputeDiskDeleteAPI = "/restmachine/cloudapi/compute/diskDel" + ComputeRestoreAPI = "/restmachine/cloudapi/compute/restore" + ComputeEnableAPI = "/restmachine/cloudapi/compute/enable" + ComputeDisableAPI = "/restmachine/cloudapi/compute/disable" ComputeAffinityLabelSetAPI = "/restmachine/cloudapi/compute/affinityLabelSet" ComputeAffinityLabelRemoveAPI = "/restmachine/cloudapi/compute/affinityLabelRemove" ComputeAffinityRuleAddAPI = "/restmachine/cloudapi/compute/affinityRuleAdd" @@ -62,4 +60,28 @@ const ( ComputeAntiAffinityRuleAddAPI = "/restmachine/cloudapi/compute/antiAffinityRuleAdd" ComputeAntiAffinityRuleRemoveAPI = "/restmachine/cloudapi/compute/antiAffinityRuleRemove" ComputeAntiAffinityRulesClearAPI = "/restmachine/cloudapi/compute/antiAffinityRulesClear" + ComputeListAPI = "/restmachine/cloudapi/compute/list" + ComputeAuditsAPI = "/restmachine/cloudapi/compute/audits" + ComputeGetAuditsAPI = "/restmachine/cloudapi/compute/getAudits" + ComputeGetConsoleUrlAPI = "/restmachine/cloudapi/compute/getConsoleUrl" + ComputeGetLogAPI = "/restmachine/cloudapi/compute/getLog" + ComputePfwListAPI = "/restmachine/cloudapi/compute/pfwList" + ComputeUserListAPI = "/restmachine/cloudapi/compute/userList" + ComputeTagAddAPI = "/restmachine/cloudapi/compute/tagAdd" + ComputeTagRemoveAPI = "/restmachine/cloudapi/compute/tagRemove" + ComputePinToStackAPI = "/restmachine/cloudapi/compute/pinToStack" + ComputeUnpinFromStackAPI = "/restmachine/cloudapi/compute/unpinFromStack" + ComputePfwAddAPI = "/restmachine/cloudapi/compute/pfwAdd" + ComputePfwDelAPI = "/restmachine/cloudapi/compute/pfwDel" + ComputeUserGrantAPI = "/restmachine/cloudapi/compute/userGrant" + ComputeUserRevokeAPI = "/restmachine/cloudapi/compute/userRevoke" + ComputeSnapshotCreateAPI = "/restmachine/cloudapi/compute/snapshotCreate" + ComputeSnapshotDeleteAPI = "/restmachine/cloudapi/compute/snapshotCreate" + ComputeSnapshotRollbackAPI = "/restmachine/cloudapi/compute/snapshotRollback" + ComputePauseAPI = "/restmachine/cloudapi/compute/pause" + ComputeResumeAPI = "/restmachine/cloudapi/compute/resume" + ComputeCdInsertAPI = "/restmachine/cloudapi/compute/cdInsert" + ComputeCdEjectAPI = "/restmachine/cloudapi/compute/cdEject" + ComputeResetAPI = "/restmachine/cloudapi/compute/reset" + ComputeRedeployAPI = "/restmachine/cloudapi/compute/redeploy" ) diff --git a/internal/service/cloudapi/kvmvm/data_source_compute.go b/internal/service/cloudapi/kvmvm/data_source_compute.go index 1fef103..8a18c1a 100644 --- a/internal/service/cloudapi/kvmvm/data_source_compute.go +++ b/internal/service/cloudapi/kvmvm/data_source_compute.go @@ -34,87 +34,14 @@ package kvmvm import ( "context" - "encoding/json" - "fmt" - - // "net/url" + "strconv" "github.com/rudecs/terraform-provider-decort/internal/constants" - "github.com/rudecs/terraform-provider-decort/internal/status" - log "github.com/sirupsen/logrus" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - // "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) -// Parse list of all disks from API compute/get into a list of "extra disks" attached to this compute -// Extra disks are all compute disks but a boot disk. -func parseComputeDisksToExtraDisks(disks []DiskRecord) []interface{} { - // this return value will be used to d.Set("extra_disks",) item of dataSourceCompute schema, - // which is a simple list of integer disk IDs excluding boot disk ID - length := len(disks) - log.Debugf("parseComputeDisksToExtraDisks: called for %d disks", length) - - if length == 0 || (length == 1 && disks[0].Type == "B") { - // the disk list is empty (which is kind of strange - diskless compute?), or - // there is only one disk in the list and it is a boot disk; - // as we skip boot disks, the result will be of 0 length anyway - return make([]interface{}, 0) - } - - result := make([]interface{}, length-1) - idx := 0 - for _, value := range disks { - if value.Type == "B" { - // skip boot disk when iterating over the list of disks - continue - } - - result[idx] = value.ID - idx++ - } - - return result -} - -func findBootDisk(disks []DiskRecord) *DiskRecord { - for _, d := range disks { - if d.Type == "B" { - return &d - } - } - - // some computes don't have a boot disk, so... - return &DiskRecord{} -} - -// Parse the list of interfaces from compute/get response into a list of networks -// attached to this compute -func parseComputeInterfacesToNetworks(ifaces []InterfaceRecord) []interface{} { - // return value will be used to d.Set("network") item of dataSourceCompute schema - length := len(ifaces) - log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length) - - result := []interface{}{} - - for _, value := range ifaces { - elem := make(map[string]interface{}) - // Keys in this map should correspond to the Schema definition - // as returned by networkSubresourceSchemaMake() - elem["net_id"] = value.NetID - elem["net_type"] = value.NetType - elem["ip_address"] = value.IPAddress - elem["mac"] = value.MAC - - // log.Debugf(" element %d: net_id=%d, net_type=%s", i, value.NetID, value.NetType) - - result = append(result, elem) - } - - return result -} - func findInExtraDisks(DiskId uint, ExtraDisks []interface{}) bool { for _, ExtraDisk := range ExtraDisks { if DiskId == uint(ExtraDisk.(int)) { @@ -124,337 +51,796 @@ func findInExtraDisks(DiskId uint, ExtraDisks []interface{}) bool { return false } -func flattenDataComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - for _, disk := range disksList { - if findInExtraDisks(disk.ID, extraDisks) { //skip main bootdisk and extraDisks - continue - } - temp := map[string]interface{}{ - "disk_name": disk.Name, - "disk_id": disk.ID, - "disk_type": disk.Type, - "sep_id": disk.SepID, - "shareable": disk.Shareable, - "size_max": disk.SizeMax, - "size_used": disk.SizeUsed, - "pool": disk.Pool, - "desc": disk.Desc, - "image_id": disk.ImageID, - "size": disk.SizeMax, - } - res = append(res, temp) - } - return res -} - -func flattenDataCompute(d *schema.ResourceData, compFacts string) error { - // This function expects that compFacts string contains response from API compute/get, - // i.e. detailed information about compute instance. - // - // NOTE: this function modifies ResourceData argument - as such it should never be called - // from resourceComputeExists(...) method - model := ComputeGetResp{} - log.Debugf("flattenCompute: ready to unmarshal string %s", compFacts) - err := json.Unmarshal([]byte(compFacts), &model) +func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + compute, err := utilityDataComputeCheckPresence(ctx, d, m) if err != nil { - return err - } - - log.Debugf("flattenCompute: ID %d, RG ID %d", model.ID, model.RgID) - - d.SetId(fmt.Sprintf("%d", model.ID)) - // d.Set("compute_id", model.ID) - we should NOT set compute_id in the schema here: if it was set - it is already set, if it wasn't - we shouldn't - d.Set("name", model.Name) - d.Set("rg_id", model.RgID) - d.Set("rg_name", model.RgName) - d.Set("account_id", model.AccountID) - d.Set("account_name", model.AccountName) - d.Set("driver", model.Driver) - d.Set("cpu", model.Cpu) - d.Set("ram", model.Ram) - // d.Set("boot_disk_size", model.BootDiskSize) - bootdiskSize key in API compute/get is always zero, so we set boot_disk_size in another way - if model.VirtualImageID != 0 { - d.Set("image_id", model.VirtualImageID) - } else { - d.Set("image_id", model.ImageID) - } - d.Set("description", model.Desc) - d.Set("enabled", false) - if model.Status == status.Enabled { - d.Set("enabled", true) - } - - //d.Set("cloud_init", "applied") // NOTE: for existing compute we hard-code this value as an indicator for DiffSuppress fucntion - // d.Set("status", model.Status) - // d.Set("tech_status", model.TechStatus) - d.Set("started", false) - if model.TechStatus == "STARTED" { - d.Set("started", true) + return diag.FromErr(err) } + d.SetId(strconv.Itoa(int(compute.ID))) - bootDisk := findBootDisk(model.Disks) - - d.Set("boot_disk_size", bootDisk.SizeMax) - d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations - d.Set("sep_id", bootDisk.SepID) - d.Set("pool", bootDisk.Pool) - - //if len(model.Disks) > 0 { - //log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(model.Disks)) - //if err = d.Set("extra_disks", parseComputeDisksToExtraDisks(model.Disks)); err != nil { - //return err - //} - //} + flattenDataCompute(d, compute) + return nil +} - if len(model.Interfaces) > 0 { - log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(model.Interfaces)) - if err = d.Set("network", parseComputeInterfacesToNetworks(model.Interfaces)); err != nil { - return err - } +func computeListRulesSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "key": { + Type: schema.TypeString, + Computed: true, + }, + "mode": { + Type: schema.TypeString, + Computed: true, + }, + "policy": { + Type: schema.TypeString, + Computed: true, + }, + "topology": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, } + return res +} - if len(model.OsUsers) > 0 { - log.Debugf("flattenCompute: calling parseOsUsers for %d logins", len(model.OsUsers)) - if err = d.Set("os_users", parseOsUsers(model.OsUsers)); err != nil { - return err - } +func computeListACLSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "explicit": { + Type: schema.TypeBool, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "right": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "user_group_id": { + Type: schema.TypeString, + Computed: true, + }, } +} - err = d.Set("disks", flattenDataComputeDisksDemo(model.Disks, d.Get("extra_disks").(*schema.Set).List())) - if err != nil { - return err +func computeACLSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "account_acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeListACLSchemaMake(), + }, + }, + "compute_acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeListACLSchemaMake(), + }, + }, + "rg_acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeListACLSchemaMake(), + }, + }, } - - return nil } -func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - compFacts, err := utilityComputeCheckPresence(ctx, d, m) - if compFacts == "" { - // if empty string is returned from utilityComputeCheckPresence then there is no - // such Compute and err tells so - just return it to the calling party - d.SetId("") // ensure ID is empty - return diag.FromErr(err) +func computeIOTuneSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "read_bytes_sec": { + Type: schema.TypeInt, + Computed: true, + }, + "read_bytes_sec_max": { + Type: schema.TypeInt, + Computed: true, + }, + "read_iops_sec": { + Type: schema.TypeInt, + Computed: true, + }, + "read_iops_sec_max": { + Type: schema.TypeInt, + Computed: true, + }, + "size_iops_sec": { + Type: schema.TypeInt, + Computed: true, + }, + "total_bytes_sec": { + Type: schema.TypeInt, + Computed: true, + }, + "total_bytes_sec_max": { + Type: schema.TypeInt, + Computed: true, + }, + "total_iops_sec": { + Type: schema.TypeInt, + Computed: true, + }, + "total_iops_sec_max": { + Type: schema.TypeInt, + Computed: true, + }, + "write_bytes_sec": { + Type: schema.TypeInt, + Computed: true, + }, + "write_bytes_sec_max": { + Type: schema.TypeInt, + Computed: true, + }, + "write_iops_sec": { + Type: schema.TypeInt, + Computed: true, + }, + "write_iops_sec_max": { + Type: schema.TypeInt, + Computed: true, + }, } +} - if err = flattenDataCompute(d, compFacts); err != nil { - return diag.FromErr(err) +func computeSnapshotsSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "label": { + Type: schema.TypeString, + Computed: true, + }, + "res_id": { + Type: schema.TypeString, + Computed: true, + }, + "snap_set_guid": { + Type: schema.TypeString, + Computed: true, + }, + "snap_set_time": { + Type: schema.TypeInt, + Computed: true, + }, + "timestamp": { + Type: schema.TypeInt, + Computed: true, + }, } - - return nil } - -func DataSourceCompute() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceComputeRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, +func computeListDisksSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "_ckey": { + Type: schema.TypeString, + Computed: true, }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: "Name of this compute instance. NOTE: this parameter is case sensitive.", - }, - - // TODO: consider removing compute_id from the schema, as it not practical to call this data provider if - // corresponding compute ID is already known - "compute_id": { - Type: schema.TypeInt, - Optional: true, - Description: "ID of the compute instance. If ID is specified, name and resource group ID are ignored.", - }, - - "rg_id": { - Type: schema.TypeInt, - Optional: true, - Description: "ID of the resource group where this compute instance is located.", + "acl": { + Type: schema.TypeString, + Computed: true, + }, + "account_id": { + Type: schema.TypeInt, + Computed: true, + }, + "boot_partition": { + Type: schema.TypeInt, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "destruction_time": { + Type: schema.TypeInt, + Computed: true, + }, + "disk_path": { + Type: schema.TypeString, + Computed: true, + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "disk_id": { + Type: schema.TypeInt, + Computed: true, + }, + "image_id": { + Type: schema.TypeInt, + Computed: true, + }, + "images": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, }, - - "enabled": { - Type: schema.TypeBool, - Computed: true, - Description: "If true - enable the compute, else - disable", + }, + "iotune": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeIOTuneSchemaMake(), }, - - "rg_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the resource group where this compute instance is located.", + }, + "iqn": { + Type: schema.TypeString, + Computed: true, + }, + "login": { + Type: schema.TypeString, + Computed: true, + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "order": { + Type: schema.TypeInt, + Computed: true, + }, + "params": { + Type: schema.TypeString, + Computed: true, + }, + "parent_id": { + Type: schema.TypeInt, + Computed: true, + }, + "passwd": { + Type: schema.TypeString, + Computed: true, + }, + "pci_slot": { + Type: schema.TypeInt, + Computed: true, + }, + "pool": { + Type: schema.TypeString, + Computed: true, + }, + "present_to": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, }, - - "account_id": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the account this compute instance belongs to.", + }, + "purge_time": { + Type: schema.TypeInt, + Computed: true, + }, + "reality_device_number": { + Type: schema.TypeInt, + Computed: true, + }, + "res_id": { + Type: schema.TypeString, + Computed: true, + }, + "role": { + Type: schema.TypeString, + Computed: true, + }, + "sep_id": { + Type: schema.TypeInt, + Computed: true, + }, + "shareable": { + Type: schema.TypeBool, + Computed: true, + }, + "size_max": { + Type: schema.TypeInt, + Computed: true, + }, + "size_used": { + Type: schema.TypeFloat, + Computed: true, + }, + "snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeSnapshotsSchemaMake(), }, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "tech_status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "vmid": { + Type: schema.TypeInt, + Computed: true, + }, + } +} - "account_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the account this compute instance belongs to.", - }, +func computeQOSSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "e_rate": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "in_brust": { + Type: schema.TypeInt, + Computed: true, + }, + "in_rate": { + Type: schema.TypeInt, + Computed: true, + }, + } +} - "driver": { - Type: schema.TypeString, - Computed: true, - Description: "Hardware architecture of this compute instance.", +func computeInterfacesSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "conn_id": { + Type: schema.TypeInt, + Computed: true, + }, + "conn_type": { + Type: schema.TypeString, + Computed: true, + }, + "def_gw": { + Type: schema.TypeString, + Computed: true, + }, + "flip_group_id": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "listen_ssh": { + Type: schema.TypeBool, + Computed: true, + }, + "mac": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "net_id": { + Type: schema.TypeInt, + Computed: true, + }, + "netmask": { + Type: schema.TypeInt, + Computed: true, + }, + "net_type": { + Type: schema.TypeString, + Computed: true, + }, + "pci_slot": { + Type: schema.TypeInt, + Computed: true, + }, + "qos": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeQOSSchemaMake(), }, - - "cpu": { - Type: schema.TypeInt, - Computed: true, - Description: "Number of CPUs allocated for this compute instance.", + }, + "target": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "vnfs": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, }, - - "ram": { - Type: schema.TypeInt, - Computed: true, - Description: "Amount of RAM in MB allocated for this compute instance.", + }, + } +} +func computeOsUsersSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "login": { + Type: schema.TypeString, + Computed: true, + }, + "password": { + Type: schema.TypeString, + Computed: true, + }, + "public_key": { + Type: schema.TypeString, + Computed: true, + }, + } +} +func computeSnapSetsSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "disks": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, }, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "label": { + Type: schema.TypeString, + Computed: true, + }, + "timestamp": { + Type: schema.TypeInt, + Computed: true, + }, + } +} +func dataSourceComputeSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "compute_id": { + Type: schema.TypeInt, + Required: true, + }, - "image_id": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the OS image this compute instance is based on.", + "acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeACLSchemaMake(), }, - - "image_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the OS image this compute instance is based on.", + }, + "account_id": { + Type: schema.TypeInt, + Computed: true, + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + }, + "affinity_label": { + Type: schema.TypeString, + Computed: true, + }, + "affinity_rules": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeListRulesSchemaMake(), }, - - "boot_disk_size": { - Type: schema.TypeInt, - Computed: true, - Description: "This compute instance boot disk size in GB.", + }, + "affinity_weight": { + Type: schema.TypeInt, + Computed: true, + }, + "anti_affinity_rules": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeListRulesSchemaMake(), }, - - "boot_disk_id": { - Type: schema.TypeInt, - Computed: true, - Description: "This compute instance boot disk ID.", + }, + "arch": { + Type: schema.TypeString, + Computed: true, + }, + "boot_order": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, }, - - "extra_disks": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "IDs of the extra disk(s) attached to this compute.", + }, + "bootdisk_size": { + Type: schema.TypeInt, + Computed: true, + }, + "clone_reference": { + Type: schema.TypeInt, + Computed: true, + }, + "clones": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, }, - - "disks": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disk_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name for disk", - }, - "size": { - Type: schema.TypeInt, - Computed: true, - Description: "Disk size in GiB", - }, - "disk_type": { - Type: schema.TypeString, - Computed: true, - Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data'", - }, - "sep_id": { - Type: schema.TypeInt, - Computed: true, - Description: "Storage endpoint provider ID; by default the same with boot disk", - }, - "shareable": { - Type: schema.TypeBool, - Computed: true, - }, - "size_max": { - Type: schema.TypeInt, - Computed: true, - }, - "size_used": { - Type: schema.TypeFloat, - Computed: true, - }, - "pool": { - Type: schema.TypeString, - Computed: true, - Description: "Pool name; by default will be chosen automatically", - }, - "desc": { - Type: schema.TypeString, - Computed: true, - Description: "Optional description", - }, - "image_id": { - Type: schema.TypeInt, - Computed: true, - Description: "Specify image id for create disk from template", - }, - "disk_id": { - Type: schema.TypeInt, - Computed: true, - Description: "Disk ID", - }, - "permanently": { - Type: schema.TypeBool, - Computed: true, - Description: "Disk deletion status", - }, + }, + "computeci_id": { + Type: schema.TypeInt, + Computed: true, + }, + "cpus": { + Type: schema.TypeInt, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "custom_fields": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Computed: true, + }, + "val": { + Type: schema.TypeString, + Computed: true, }, }, }, - - "network": { - Type: schema.TypeSet, - Optional: true, - MaxItems: constants.MaxNetworksPerCompute, - Elem: &schema.Resource{ - Schema: networkSubresourceSchemaMake(), - }, - Description: "Network connection(s) for this compute.", + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "desc": { + Type: schema.TypeString, + Computed: true, + }, + "devices": { + Type: schema.TypeString, + Computed: true, + }, + "disks": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeListDisksSchemaMake(), }, - - "os_users": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: osUsersSubresourceSchemaMake(), - }, - Description: "Guest OS users provisioned on this compute instance.", + }, + "driver": { + Type: schema.TypeString, + Computed: true, + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "image_id": { + Type: schema.TypeInt, + Computed: true, + }, + "interfaces": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeInterfacesSchemaMake(), }, - - "description": { - Type: schema.TypeString, - Computed: true, - Description: "User-defined text description of this compute instance.", + }, + "lock_status": { + Type: schema.TypeString, + Computed: true, + }, + "manager_id": { + Type: schema.TypeInt, + Computed: true, + }, + "manager_type": { + Type: schema.TypeString, + Computed: true, + }, + "migrationjob": { + Type: schema.TypeInt, + Computed: true, + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "natable_vins_id": { + Type: schema.TypeInt, + Computed: true, + }, + "natable_vins_ip": { + Type: schema.TypeString, + Computed: true, + }, + "natable_vins_name": { + Type: schema.TypeString, + Computed: true, + }, + "natable_vins_network": { + Type: schema.TypeString, + Computed: true, + }, + "natable_vins_network_name": { + Type: schema.TypeString, + Computed: true, + }, + "os_users": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeOsUsersSchemaMake(), }, - - "cloud_init": { - Type: schema.TypeString, - Computed: true, - Description: "Placeholder for cloud_init parameters.", + }, + "pinned": { + Type: schema.TypeBool, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "reference_id": { + Type: schema.TypeString, + Computed: true, + }, + "registered": { + Type: schema.TypeBool, + Computed: true, + }, + "res_name": { + Type: schema.TypeString, + Computed: true, + }, + "rg_id": { + Type: schema.TypeInt, + Computed: true, + }, + "rg_name": { + Type: schema.TypeString, + Computed: true, + }, + "snap_sets": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeSnapSetsSchemaMake(), }, - - "started": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Is compute started.", + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "tags": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "tech_status": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "user_managed": { + Type: schema.TypeBool, + Computed: true, + }, + "userdata": { + Type: schema.TypeString, + Computed: true, + }, + "vgpus": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, }, }, + "virtual_image_id": { + Type: schema.TypeInt, + Computed: true, + }, + "virtual_image_name": { + Type: schema.TypeString, + Computed: true, + }, + } +} + +func DataSourceCompute() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceComputeRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceComputeSchemaMake(), } } diff --git a/internal/service/cloudapi/kvmvm/data_source_compute_audits.go b/internal/service/cloudapi/kvmvm/data_source_compute_audits.go new file mode 100644 index 0000000..2c1378b --- /dev/null +++ b/internal/service/cloudapi/kvmvm/data_source_compute_audits.go @@ -0,0 +1,74 @@ +package kvmvm + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceComputeAuditsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + computeAudits, err := utilityComputeAuditsCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenComputeAudits(computeAudits)) + return nil +} + +func dataSourceComputeAuditsSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "compute_id": { + Type: schema.TypeInt, + Required: true, + }, + + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "call": { + Type: schema.TypeString, + Computed: true, + }, + "responsetime": { + Type: schema.TypeFloat, + Computed: true, + }, + "statuscode": { + Type: schema.TypeInt, + Computed: true, + }, + "timestamp": { + Type: schema.TypeFloat, + Computed: true, + }, + "user": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + } +} + +func DataSourceComputeAudits() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceComputeAuditsRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceComputeAuditsSchemaMake(), + } +} diff --git a/internal/service/cloudapi/kvmvm/data_source_compute_get_audits.go b/internal/service/cloudapi/kvmvm/data_source_compute_get_audits.go new file mode 100644 index 0000000..ac8b691 --- /dev/null +++ b/internal/service/cloudapi/kvmvm/data_source_compute_get_audits.go @@ -0,0 +1,62 @@ +package kvmvm + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceComputeGetAuditsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + computeAudits, err := utilityComputeGetAuditsCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenComputeGetAudits(computeAudits)) + return nil +} + +func dataSourceComputeGetAuditsSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "compute_id": { + Type: schema.TypeInt, + Required: true, + }, + + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "epoch": { + Type: schema.TypeFloat, + Computed: true, + }, + "message": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + } +} + +func DataSourceComputeGetAudits() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceComputeGetAuditsRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceComputeGetAuditsSchemaMake(), + } +} diff --git a/internal/service/cloudapi/kvmvm/data_source_compute_get_console_url.go b/internal/service/cloudapi/kvmvm/data_source_compute_get_console_url.go new file mode 100644 index 0000000..e464ff3 --- /dev/null +++ b/internal/service/cloudapi/kvmvm/data_source_compute_get_console_url.go @@ -0,0 +1,52 @@ +package kvmvm + +import ( + "context" + "strings" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceComputeGetConsoleUrlRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + computeConsoleUrl, err := utilityComputeGetConsoleUrlCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + id := uuid.New() + d.SetId(id.String()) + result := strings.ReplaceAll(string(computeConsoleUrl), "\"", "") + result = strings.ReplaceAll(string(result), "\\", "") + d.Set("console_url", result) + return nil +} + +func dataSourceComputeGetConsoleUrlSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "compute_id": { + Type: schema.TypeInt, + Required: true, + }, + "console_url": { + Type: schema.TypeString, + Computed: true, + }, + } +} + +func DataSourceComputeGetConsoleUrl() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceComputeGetConsoleUrlRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceComputeGetConsoleUrlSchemaMake(), + } +} diff --git a/internal/service/cloudapi/kvmvm/data_source_compute_get_log.go b/internal/service/cloudapi/kvmvm/data_source_compute_get_log.go new file mode 100644 index 0000000..b40bfb9 --- /dev/null +++ b/internal/service/cloudapi/kvmvm/data_source_compute_get_log.go @@ -0,0 +1,53 @@ +package kvmvm + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceComputeGetLogRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + computeGetLog, err := utilityComputeGetLogCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + id := uuid.New() + d.SetId(id.String()) + d.Set("log", computeGetLog) + return nil +} + +func dataSourceComputeGetLogSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "compute_id": { + Type: schema.TypeInt, + Required: true, + }, + "path": { + Type: schema.TypeString, + Required: true, + }, + "log": { + Type: schema.TypeString, + Computed: true, + }, + } +} + +func DataSourceComputeGetLog() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceComputeGetLogRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceComputeGetLogSchemaMake(), + } +} diff --git a/internal/service/cloudapi/kvmvm/data_source_compute_list.go b/internal/service/cloudapi/kvmvm/data_source_compute_list.go new file mode 100644 index 0000000..369bc6a --- /dev/null +++ b/internal/service/cloudapi/kvmvm/data_source_compute_list.go @@ -0,0 +1,335 @@ +package kvmvm + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceComputeListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + computeList, err := utilityDataComputeListCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenComputeList(computeList)) + return nil +} + +func computeDisksSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "disk_id": { + Type: schema.TypeInt, + Computed: true, + }, + "pci_slot": { + Type: schema.TypeInt, + Computed: true, + }, + } +} +func itemComputeSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeListACLSchemaMake(), + }, + }, + "account_id": { + Type: schema.TypeInt, + Computed: true, + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + }, + "affinity_label": { + Type: schema.TypeString, + Computed: true, + }, + "affinity_rules": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeListRulesSchemaMake(), + }, + }, + "affinity_weight": { + Type: schema.TypeInt, + Computed: true, + }, + "anti_affinity_rules": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeListRulesSchemaMake(), + }, + }, + "arch": { + Type: schema.TypeString, + Computed: true, + }, + "boot_order": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "bootdisk_size": { + Type: schema.TypeInt, + Computed: true, + }, + "clone_reference": { + Type: schema.TypeInt, + Computed: true, + }, + "clones": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "computeci_id": { + Type: schema.TypeInt, + Computed: true, + }, + "cpus": { + Type: schema.TypeInt, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "custom_fields": { //NEED + Type: schema.TypeString, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "desc": { + Type: schema.TypeString, + Computed: true, + }, + "devices": { //NEED + Type: schema.TypeString, + Computed: true, + }, + "disks": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeDisksSchemaMake(), + }, + }, + "driver": { + Type: schema.TypeString, + Computed: true, + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "compute_id": { + Type: schema.TypeInt, + Computed: true, + }, + "image_id": { + Type: schema.TypeInt, + Computed: true, + }, + "interfaces": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeInterfacesSchemaMake(), + }, + }, + "lock_status": { + Type: schema.TypeString, + Computed: true, + }, + "manager_id": { + Type: schema.TypeInt, + Computed: true, + }, + "manager_type": { + Type: schema.TypeString, + Computed: true, + }, + "migrationjob": { + Type: schema.TypeInt, + Computed: true, + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "pinned": { + Type: schema.TypeBool, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "reference_id": { + Type: schema.TypeString, + Computed: true, + }, + "registered": { + Type: schema.TypeBool, + Computed: true, + }, + "res_name": { + Type: schema.TypeString, + Computed: true, + }, + "rg_id": { + Type: schema.TypeInt, + Computed: true, + }, + "rg_name": { + Type: schema.TypeString, + Computed: true, + }, + "snap_sets": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeSnapSetsSchemaMake(), + }, + }, + "stateless_sep_id": { + Type: schema.TypeInt, + Computed: true, + }, + "stateless_sep_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "tags": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Computed: true, + }, + "val": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "tech_status": { + Type: schema.TypeString, + Computed: true, + }, + "total_disk_size": { + Type: schema.TypeInt, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "user_managed": { + Type: schema.TypeBool, + Computed: true, + }, + "vgpus": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "vins_connected": { + Type: schema.TypeInt, + Computed: true, + }, + "virtual_image_id": { + Type: schema.TypeInt, + Computed: true, + }, + } +} + +func dataSourceCompputeListSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "includedeleted": { + Type: schema.TypeBool, + Optional: true, + }, + "page": { + Type: schema.TypeInt, + Optional: true, + }, + "size": { + Type: schema.TypeInt, + Optional: true, + }, + + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: itemComputeSchemaMake(), + }, + }, + } + return res +} + +func DataSourceComputeList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceComputeListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceCompputeListSchemaMake(), + } +} diff --git a/internal/service/cloudapi/kvmvm/data_source_compute_pfw_list.go b/internal/service/cloudapi/kvmvm/data_source_compute_pfw_list.go new file mode 100644 index 0000000..78664d4 --- /dev/null +++ b/internal/service/cloudapi/kvmvm/data_source_compute_pfw_list.go @@ -0,0 +1,81 @@ +package kvmvm + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceComputePfwListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + computePfwList, err := utilityComputePfwListCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenPfwList(computePfwList)) + return nil +} + +func dataSourceComputePfwListSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "compute_id": { + Type: schema.TypeInt, + Required: true, + }, + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pfw_id": { + Type: schema.TypeInt, + Computed: true, + }, + "local_ip": { + Type: schema.TypeString, + Computed: true, + }, + "local_port": { + Type: schema.TypeInt, + Computed: true, + }, + "protocol": { + Type: schema.TypeString, + Computed: true, + }, + "public_port_end": { + Type: schema.TypeInt, + Computed: true, + }, + "public_port_start": { + Type: schema.TypeInt, + Computed: true, + }, + "vm_id": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + } +} + +func DataSourceComputePfwList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceComputePfwListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceComputePfwListSchemaMake(), + } +} diff --git a/internal/service/cloudapi/kvmvm/data_source_compute_user_list.go b/internal/service/cloudapi/kvmvm/data_source_compute_user_list.go new file mode 100644 index 0000000..7575b45 --- /dev/null +++ b/internal/service/cloudapi/kvmvm/data_source_compute_user_list.go @@ -0,0 +1,45 @@ +package kvmvm + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceComputeUserListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + computeUserList, err := utilityComputeUserListCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + id := uuid.New() + d.SetId(id.String()) + flattenUserList(d, computeUserList) + return nil +} + +func dataSourceComputeUserListSchemaMake() map[string]*schema.Schema { + res := computeACLSchemaMake() + res["compute_id"] = &schema.Schema{ + Type: schema.TypeInt, + Required: true, + } + return res +} + +func DataSourceComputeUserList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceComputeUserListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceComputeUserListSchemaMake(), + } +} diff --git a/internal/service/cloudapi/kvmvm/flattens.go b/internal/service/cloudapi/kvmvm/flattens.go index ed0b6fd..f97bfc5 100644 --- a/internal/service/cloudapi/kvmvm/flattens.go +++ b/internal/service/cloudapi/kvmvm/flattens.go @@ -2,17 +2,195 @@ package kvmvm import ( "encoding/json" - "fmt" + "sort" + "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/rudecs/terraform-provider-decort/internal/status" log "github.com/sirupsen/logrus" ) -func flattenComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) []map[string]interface{} { +func flattenDisks(disks []InfoDisk) []map[string]interface{} { res := make([]map[string]interface{}, 0) + for _, disk := range disks { + temp := map[string]interface{}{ + "disk_id": disk.ID, + "pci_slot": disk.PCISlot, + } + res = append(res, temp) + } + return res +} +func flattenQOS(qos QOS) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "e_rate": qos.ERate, + "guid": qos.GUID, + "in_brust": qos.InBurst, + "in_rate": qos.InRate, + } + res = append(res, temp) + return res +} +func flattenInterfaces(interfaces ListInterfaces) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, interfaceItem := range interfaces { + temp := map[string]interface{}{ + "conn_id": interfaceItem.ConnID, + "conn_type": interfaceItem.ConnType, + "def_gw": interfaceItem.DefGW, + "flip_group_id": interfaceItem.FLIPGroupID, + "guid": interfaceItem.GUID, + "ip_address": interfaceItem.IPAddress, + "listen_ssh": interfaceItem.ListenSSH, + "mac": interfaceItem.MAC, + "name": interfaceItem.Name, + "net_id": interfaceItem.NetID, + "netmask": interfaceItem.NetMask, + "net_type": interfaceItem.NetType, + "pci_slot": interfaceItem.PCISlot, + "qos": flattenQOS(interfaceItem.QOS), + "target": interfaceItem.Target, + "type": interfaceItem.Type, + "vnfs": interfaceItem.VNFs, + } + res = append(res, temp) + } + return res +} +func flattenSnapSets(snapSets ListSnapSets) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, snapSet := range snapSets { + temp := map[string]interface{}{ + "disks": snapSet.Disks, + "guid": snapSet.GUID, + "label": snapSet.Label, + "timestamp": snapSet.Timestamp, + } + res = append(res, temp) + } + return res +} +func flattenTags(tags map[string]string) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for key, val := range tags { + temp := map[string]interface{}{ + "key": key, + "val": val, + } + res = append(res, temp) + } + return res +} + +func flattenListRules(listRules ListRules) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, rule := range listRules { + temp := map[string]interface{}{ + "guid": rule.GUID, + "key": rule.Key, + "mode": rule.Mode, + "policy": rule.Policy, + "topology": rule.Topology, + "value": rule.Value, + } + res = append(res, temp) + } + return res +} +func flattenListACL(listAcl ListACL) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, acl := range listAcl { + var explicit interface{} + switch acl.Explicit.(type) { //Платформенный хак + case bool: + explicit = acl.Explicit.(bool) + case string: + explicit, _ = strconv.ParseBool(acl.Explicit.(string)) + } + temp := map[string]interface{}{ + "explicit": explicit, + "guid": acl.GUID, + "right": acl.Right, + "status": acl.Status, + "type": acl.Type, + "user_group_id": acl.UserGroupID, + } + res = append(res, temp) + } + return res +} +func flattenComputeList(computes ListComputes) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, compute := range computes { + customFields, _ := json.Marshal(compute.CustomFields) + devices, _ := json.Marshal(compute.Devices) + temp := map[string]interface{}{ + "acl": flattenListACL(compute.ACL), + "account_id": compute.AccountID, + "account_name": compute.AccountName, + "affinity_label": compute.AffinityLabel, + "affinity_rules": flattenListRules(compute.AffinityRules), + "affinity_weight": compute.AffinityWeight, + "anti_affinity_rules": flattenListRules(compute.AntiAffinityRules), + "arch": compute.Architecture, + "boot_order": compute.BootOrder, + "bootdisk_size": compute.BootDiskSize, + "clone_reference": compute.CloneReference, + "clones": compute.Clones, + "computeci_id": compute.ComputeCIID, + "cpus": compute.CPU, + "created_by": compute.CreatedBy, + "created_time": compute.CreatedTime, + "custom_fields": string(customFields), + "deleted_by": compute.DeletedBy, + "deleted_time": compute.DeletedTime, + "desc": compute.Description, + "devices": string(devices), + "disks": flattenDisks(compute.Disks), + "driver": compute.Driver, + "gid": compute.GID, + "guid": compute.GUID, + "compute_id": compute.ID, + "image_id": compute.ImageID, + "interfaces": flattenInterfaces(compute.Interfaces), + "lock_status": compute.LockStatus, + "manager_id": compute.ManagerID, + "manager_type": compute.ManagerType, + "migrationjob": compute.MigrationJob, + "milestones": compute.Milestones, + "name": compute.Name, + "pinned": compute.Pinned, + "ram": compute.RAM, + "reference_id": compute.ReferenceID, + "registered": compute.Registered, + "res_name": compute.ResName, + "rg_id": compute.RGID, + "rg_name": compute.RGName, + "snap_sets": flattenSnapSets(compute.SnapSets), + "stateless_sep_id": compute.StatelessSepID, + "stateless_sep_type": compute.StatelessSepType, + "status": compute.Status, + "tags": flattenTags(compute.Tags), + "tech_status": compute.TechStatus, + "total_disk_size": compute.TotalDiskSize, + "updated_by": compute.UpdatedBy, + "updated_time": compute.UpdatedTime, + "user_managed": compute.UserManaged, + "vgpus": compute.VGPUs, + "vins_connected": compute.VINSConnected, + "virtual_image_id": compute.VirtualImageID, + } + res = append(res, temp) + } + + return res +} + +func flattenComputeDisksDemo(disksList ListComputeDisks, extraDisks []interface{}) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(disksList)) for _, disk := range disksList { - if disk.Name == "bootdisk" || findInExtraDisks(disk.ID, extraDisks) { //skip main bootdisk and extraDisks + if disk.Name == "bootdisk" || findInExtraDisks(uint(disk.ID), extraDisks) { //skip main bootdisk and extraDisks continue } temp := map[string]interface{}{ @@ -24,66 +202,139 @@ func flattenComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) [ "size_max": disk.SizeMax, "size_used": disk.SizeUsed, "pool": disk.Pool, - "desc": disk.Desc, + "desc": disk.Description, "image_id": disk.ImageID, "size": disk.SizeMax, } res = append(res, temp) } + sort.Slice(res, func(i, j int) bool { + return res[i]["disk_id"].(uint64) < res[j]["disk_id"].(uint64) + }) return res } -func flattenCompute(d *schema.ResourceData, compFacts string) error { +func flattenNetwork(interfaces ListInterfaces) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(interfaces)) + //index := 0 + for _, network := range interfaces { + temp := map[string]interface{}{ + "net_id": network.NetID, + "net_type": network.NetType, + "ip_address": network.IPAddress, + "mac": network.MAC, + } + res = append(res, temp) + } + return res +} + +func findBootDisk(disks ListComputeDisks) *ItemComputeDisk { + for _, disk := range disks { + if disk.Name == "bootdisk" { + return &disk + } + } + return nil +} + +func flattenCompute(d *schema.ResourceData, compute RecordCompute) error { // This function expects that compFacts string contains response from API compute/get, // i.e. detailed information about compute instance. // // NOTE: this function modifies ResourceData argument - as such it should never be called // from resourceComputeExists(...) method - model := ComputeGetResp{} - log.Debugf("flattenCompute: ready to unmarshal string %s", compFacts) - err := json.Unmarshal([]byte(compFacts), &model) + log.Debugf("flattenCompute: ID %d, RG ID %d", compute.ID, compute.RGID) + + devices, _ := json.Marshal(compute.Devices) + userdata, _ := json.Marshal(compute.Userdata) + + //check extraDisks, ipa_type, is, + d.SetId(strconv.FormatUint(compute.ID, 10)) + d.Set("acl", flattenACL(compute.ACL)) + d.Set("account_id", compute.AccountID) + d.Set("account_name", compute.AccountName) + d.Set("affinity_weight", compute.AffinityWeight) + d.Set("arch", compute.Architecture) + d.Set("boot_order", compute.BootOrder) + d.Set("boot_disk_size", compute.BootDiskSize) + bootDisk := findBootDisk(compute.Disks) + d.Set("boot_disk_id", bootDisk.ID) + d.Set("sep_id", bootDisk.SepID) + d.Set("pool", bootDisk.Pool) + d.Set("clone_reference", compute.CloneReference) + d.Set("clones", compute.Clones) + if string(userdata) != "{}" { + d.Set("cloud_init", string(userdata)) + } + d.Set("computeci_id", compute.ComputeCIID) + d.Set("created_by", compute.CreatedBy) + d.Set("created_time", compute.CreatedTime) + d.Set("custom_fields", flattenCustomFields(compute.CustomFields)) + d.Set("deleted_by", compute.DeletedBy) + d.Set("deleted_time", compute.DeletedTime) + d.Set("description", compute.Description) + d.Set("devices", string(devices)) + err := d.Set("disks", flattenComputeDisksDemo(compute.Disks, d.Get("extra_disks").(*schema.Set).List())) if err != nil { return err } - - log.Debugf("flattenCompute: ID %d, RG ID %d", model.ID, model.RgID) - - d.SetId(fmt.Sprintf("%d", model.ID)) - // d.Set("compute_id", model.ID) - we should NOT set compute_id in the schema here: if it was set - it is already set, if it wasn't - we shouldn't - d.Set("name", model.Name) - d.Set("rg_id", model.RgID) - d.Set("rg_name", model.RgName) - d.Set("account_id", model.AccountID) - d.Set("account_name", model.AccountName) - d.Set("driver", model.Driver) - d.Set("cpu", model.Cpu) - d.Set("ram", model.Ram) - // d.Set("boot_disk_size", model.BootDiskSize) - bootdiskSize key in API compute/get is always zero, so we set boot_disk_size in another way - if model.VirtualImageID != 0 { - d.Set("image_id", model.VirtualImageID) + d.Set("driver", compute.Driver) + d.Set("cpu", compute.CPU) + d.Set("gid", compute.GID) + d.Set("guid", compute.GUID) + d.Set("compute_id", compute.ID) + if compute.VirtualImageID != 0 { + d.Set("image_id", compute.VirtualImageID) } else { - d.Set("image_id", model.ImageID) + d.Set("image_id", compute.ImageID) + } + d.Set("interfaces", flattenInterfaces(compute.Interfaces)) + d.Set("lock_status", compute.LockStatus) + d.Set("manager_id", compute.ManagerID) + d.Set("manager_type", compute.ManagerType) + d.Set("migrationjob", compute.MigrationJob) + d.Set("milestones", compute.Milestones) + d.Set("name", compute.Name) + d.Set("natable_vins_id", compute.NatableVINSID) + d.Set("natable_vins_ip", compute.NatableVINSIP) + d.Set("natable_vins_name", compute.NatableVINSName) + d.Set("natable_vins_network", compute.NatableVINSNetwork) + d.Set("natable_vins_network_name", compute.NatableVINSNetworkName) + if err := d.Set("os_users", parseOsUsers(compute.OSUsers)); err != nil { + return err } - d.Set("description", model.Desc) + d.Set("pinned", compute.Pinned) + d.Set("ram", compute.RAM) + d.Set("reference_id", compute.ReferenceID) + d.Set("registered", compute.Registered) + d.Set("res_name", compute.ResName) + d.Set("rg_id", compute.RGID) + d.Set("rg_name", compute.RGName) + d.Set("snap_sets", flattenSnapSets(compute.SnapSets)) + d.Set("stateless_sep_id", compute.StatelessSepID) + d.Set("stateless_sep_type", compute.StatelessSepType) + d.Set("status", compute.Status) + d.Set("tags", flattenTags(compute.Tags)) + d.Set("tech_status", compute.TechStatus) + d.Set("updated_by", compute.UpdatedBy) + d.Set("updated_time", compute.UpdatedTime) + d.Set("user_managed", compute.UserManaged) + d.Set("vgpus", compute.VGPUs) + d.Set("virtual_image_id", compute.VirtualImageID) + d.Set("virtual_image_name", compute.VirtualImageName) + d.Set("enabled", false) - if model.Status == status.Enabled { + if compute.Status == status.Enabled { d.Set("enabled", true) } - //d.Set("cloud_init", "applied") // NOTE: for existing compute we hard-code this value as an indicator for DiffSuppress fucntion - //d.Set("status", model.Status) - //d.Set("tech_status", model.TechStatus) d.Set("started", false) - if model.TechStatus == "STARTED" { + if compute.TechStatus == "STARTED" { d.Set("started", true) } - bootDisk := findBootDisk(model.Disks) - - d.Set("boot_disk_size", bootDisk.SizeMax) - d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations - d.Set("sep_id", bootDisk.SepID) - d.Set("pool", bootDisk.Pool) + d.Set("network", flattenNetwork(compute.Interfaces)) //if len(model.Disks) > 0 { //log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(model.Disks)) @@ -92,24 +343,288 @@ func flattenCompute(d *schema.ResourceData, compFacts string) error { //} //} - if len(model.Interfaces) > 0 { - log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(model.Interfaces)) - if err = d.Set("network", parseComputeInterfacesToNetworks(model.Interfaces)); err != nil { - return err + return nil +} + +func flattenDataComputeDisksDemo(disksList ListComputeDisks, extraDisks []interface{}) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, disk := range disksList { + if findInExtraDisks(uint(disk.ID), extraDisks) { //skip main bootdisk and extraDisks + continue + } + temp := map[string]interface{}{ + "disk_name": disk.Name, + "disk_id": disk.ID, + "disk_type": disk.Type, + "sep_id": disk.SepID, + "shareable": disk.Shareable, + "size_max": disk.SizeMax, + "size_used": disk.SizeUsed, + "pool": disk.Pool, + "desc": disk.Description, + "image_id": disk.ImageID, + "size": disk.SizeMax, + } + res = append(res, temp) + } + return res +} + +func flattenACL(acl RecordACL) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "account_acl": flattenListACL(acl.AccountACL), + "compute_acl": flattenListACL(acl.ComputeACL), + "rg_acl": flattenListACL(acl.RGACL), + } + res = append(res, temp) + return res +} + +func flattenAffinityRules(affinityRules ListRules) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, affinityRule := range affinityRules { + temp := map[string]interface{}{ + "guid": affinityRule.GUID, + "key": affinityRule.Key, + "mode": affinityRule.Mode, + "policy": affinityRule.Policy, + "topology": affinityRule.Topology, + "value": affinityRule.Value, } + res = append(res, temp) } - if len(model.OsUsers) > 0 { - log.Debugf("flattenCompute: calling parseOsUsers for %d logins", len(model.OsUsers)) - if err = d.Set("os_users", parseOsUsers(model.OsUsers)); err != nil { - return err + return res +} + +func flattenIotune(iotune IOTune) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "read_bytes_sec": iotune.ReadBytesSec, + "read_bytes_sec_max": iotune.ReadBytesSecMax, + "read_iops_sec": iotune.ReadIOPSSec, + "read_iops_sec_max": iotune.ReadIOPSSecMax, + "size_iops_sec": iotune.SizeIOPSSec, + "total_bytes_sec": iotune.TotalBytesSec, + "total_bytes_sec_max": iotune.TotalBytesSecMax, + "total_iops_sec": iotune.TotalIOPSSec, + "total_iops_sec_max": iotune.TotalIOPSSecMax, + "write_bytes_sec": iotune.WriteBytesSec, + "write_bytes_sec_max": iotune.WriteBytesSecMax, + "write_iops_sec": iotune.WriteIOPSSec, + "write_iops_sec_max": iotune.WriteIOPSSecMax, + } + res = append(res, temp) + + return res +} + +func flattenSnapshots(snapshots SnapshotExtendList) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, snapshot := range snapshots { + temp := map[string]interface{}{ + "guid": snapshot.GUID, + "label": snapshot.Label, + "res_id": snapshot.ResID, + "snap_set_guid": snapshot.SnapSetGUID, + "snap_set_time": snapshot.SnapSetTime, + "timestamp": snapshot.TimeStamp, } + res = append(res, temp) } - err = d.Set("disks", flattenComputeDisksDemo(model.Disks, d.Get("extra_disks").(*schema.Set).List())) - if err != nil { - return err + return res +} + +func flattenListComputeDisks(disks ListComputeDisks) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, disk := range disks { + acl, _ := json.Marshal(disk.ACL) + temp := map[string]interface{}{ + "_ckey": disk.CKey, + "acl": string(acl), + "account_id": disk.AccountID, + "boot_partition": disk.BootPartition, + "created_time": disk.CreatedTime, + "deleted_time": disk.DeletedTime, + "description": disk.Description, + "destruction_time": disk.DestructionTime, + "disk_path": disk.DiskPath, + "gid": disk.GID, + "guid": disk.GUID, + "disk_id": disk.ID, + "image_id": disk.ImageID, + "images": disk.Images, + "iotune": flattenIotune(disk.IOTune), + "iqn": disk.IQN, + "login": disk.Login, + "milestones": disk.Milestones, + "name": disk.Name, + "order": disk.Order, + "params": disk.Params, + "parent_id": disk.ParentID, + "passwd": disk.Passwd, + "pci_slot": disk.PCISlot, + "pool": disk.Pool, + "present_to": disk.PresentTo, + "purge_time": disk.PurgeTime, + "reality_device_number": disk.RealityDeviceNumber, + "res_id": disk.ResID, + "role": disk.Role, + "sep_id": disk.SepID, + "shareable": disk.Shareable, + "size_max": disk.SizeMax, + "size_used": disk.SizeUsed, + "snapshots": flattenSnapshots(disk.Snapshots), + "status": disk.Status, + "tech_status": disk.TechStatus, + "type": disk.Type, + "vmid": disk.VMID, + } + res = append(res, temp) } - return nil + return res +} + +func flattenCustomFields(customFileds map[string]interface{}) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for key, val := range customFileds { + value, _ := json.Marshal(val) + temp := map[string]interface{}{ + "key": key, + "val": string(value), + } + res = append(res, temp) + } + return res +} +func flattenOsUsers(osUsers ListOSUser) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, user := range osUsers { + temp := map[string]interface{}{ + "guid": user.GUID, + "login": user.Login, + "password": user.Password, + "public_key": user.PubKey, + } + res = append(res, temp) + } + return res +} + +func flattenDataCompute(d *schema.ResourceData, compute RecordCompute) { + devices, _ := json.Marshal(compute.Devices) + userdata, _ := json.Marshal(compute.Userdata) + d.Set("acl", flattenACL(compute.ACL)) + d.Set("account_id", compute.AccountID) + d.Set("account_name", compute.AccountName) + d.Set("affinity_label", compute.AffinityLabel) + d.Set("affinity_rules", flattenAffinityRules(compute.AffinityRules)) + d.Set("affinity_weight", compute.AffinityWeight) + d.Set("anti_affinity_rules", flattenListRules(compute.AntiAffinityRules)) + d.Set("arch", compute.Architecture) + d.Set("boot_order", compute.BootOrder) + d.Set("bootdisk_size", compute.BootDiskSize) + d.Set("clone_reference", compute.CloneReference) + d.Set("clones", compute.Clones) + d.Set("computeci_id", compute.ComputeCIID) + d.Set("cpus", compute.CPU) + d.Set("created_by", compute.CreatedBy) + d.Set("created_time", compute.CreatedTime) + d.Set("custom_fields", flattenCustomFields(compute.CustomFields)) + d.Set("deleted_by", compute.DeletedBy) + d.Set("deleted_time", compute.DeletedTime) + d.Set("desc", compute.Description) + d.Set("devices", string(devices)) + d.Set("disks", flattenListComputeDisks(compute.Disks)) + d.Set("driver", compute.Driver) + d.Set("gid", compute.GID) + d.Set("guid", compute.GUID) + d.Set("compute_id", compute.ID) + d.Set("image_id", compute.ImageID) + d.Set("interfaces", flattenInterfaces(compute.Interfaces)) + d.Set("lock_status", compute.LockStatus) + d.Set("manager_id", compute.ManagerID) + d.Set("manager_type", compute.ManagerType) + d.Set("migrationjob", compute.MigrationJob) + d.Set("milestones", compute.Milestones) + d.Set("name", compute.Name) + d.Set("natable_vins_id", compute.NatableVINSID) + d.Set("natable_vins_ip", compute.NatableVINSIP) + d.Set("natable_vins_name", compute.NatableVINSName) + d.Set("natable_vins_network", compute.NatableVINSNetwork) + d.Set("natable_vins_network_name", compute.NatableVINSNetworkName) + d.Set("os_users", flattenOsUsers(compute.OSUsers)) + d.Set("pinned", compute.Pinned) + d.Set("ram", compute.RAM) + d.Set("reference_id", compute.ReferenceID) + d.Set("registered", compute.Registered) + d.Set("res_name", compute.ResName) + d.Set("rg_id", compute.RGID) + d.Set("rg_name", compute.RGName) + d.Set("snap_sets", flattenSnapSets(compute.SnapSets)) + d.Set("stateless_sep_id", compute.StatelessSepID) + d.Set("stateless_sep_type", compute.StatelessSepType) + d.Set("status", compute.Status) + d.Set("tags", compute.Tags) + d.Set("tech_status", compute.TechStatus) + d.Set("updated_by", compute.UpdatedBy) + d.Set("updated_time", compute.UpdatedTime) + d.Set("user_managed", compute.UserManaged) + d.Set("userdata", string(userdata)) + d.Set("vgpus", compute.VGPUs) + d.Set("virtual_image_id", compute.VirtualImageID) + d.Set("virtual_image_name", compute.VirtualImageName) +} + +func flattenComputeAudits(computeAudits ListAudits) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, computeAudit := range computeAudits { + temp := map[string]interface{}{ + "call": computeAudit.Call, + "responsetime": computeAudit.ResponseTime, + "statuscode": computeAudit.StatusCode, + "timestamp": computeAudit.Timestamp, + "user": computeAudit.User, + } + res = append(res, temp) + } + return res +} + +func flattenPfwList(computePfws ListPFWs) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, computePfw := range computePfws { + temp := map[string]interface{}{ + "pfw_id": computePfw.ID, + "local_ip": computePfw.LocalIP, + "local_port": computePfw.LocalPort, + "protocol": computePfw.Protocol, + "public_port_end": computePfw.PublicPortEnd, + "public_port_start": computePfw.PublicPortStart, + "vm_id": computePfw.VMID, + } + res = append(res, temp) + } + return res +} + +func flattenUserList(d *schema.ResourceData, userList RecordACL) { + d.Set("account_acl", flattenListACL(userList.AccountACL)) + d.Set("compute_acl", flattenListACL(userList.ComputeACL)) + d.Set("rg_acl", flattenListACL(userList.RGACL)) +} + +func flattenComputeGetAudits(computeAudits ListShortAudits) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, computeAudit := range computeAudits { + temp := map[string]interface{}{ + "epoch": computeAudit.Epoch, + "message": computeAudit.Message, + } + res = append(res, temp) + } + return res } diff --git a/internal/service/cloudapi/kvmvm/models.go b/internal/service/cloudapi/kvmvm/models.go index 68e85ab..0b59894 100644 --- a/internal/service/cloudapi/kvmvm/models.go +++ b/internal/service/cloudapi/kvmvm/models.go @@ -117,7 +117,7 @@ type SnapshotRecord struct { TimeStamp uint64 `json:"timestamp"` } -type SnapshotRecordList []SnapshotRecord +//type SnapshotRecordList []SnapshotRecord type ComputeGetResp struct { // ACLs `json:"ACL"` - it is a dictionary, special parsing required @@ -190,3 +190,888 @@ type ComputeBriefRecord struct { // this is a brief compute specifiaction as ret } type RgListComputesResp []ComputeBriefRecord + +//############# + +// Access Control List +type RecordACL struct { + // Account ACL list + AccountACL ListACL `json:"accountAcl"` + + // Compute ACL list + ComputeACL ListACL `json:"computeAcl"` + + // Resource group ACL list + RGACL ListACL `json:"rgAcl"` +} + +// ACL information +type ItemACL struct { + // Explicit + Explicit interface{} `json:"explicit"` + + // GUID + GUID string `json:"guid"` + + // Right + Right string `json:"right"` + + // Status + Status string `json:"status"` + + // Type + Type string `json:"type"` + + // User group ID + UserGroupID string `json:"userGroupId"` +} + +// List ACL +type ListACL []ItemACL + +// Main information about usage snapshot +type ItemUsageSnapshot struct { + // Count + Count uint64 `json:"count,omitempty"` + + // Stored + Stored float64 `json:"stored"` + + // Label + Label string `json:"label,omitempty"` + + // Timestamp + Timestamp uint64 `json:"timestamp,omitempty"` +} + +// List of usage snapshot +type ListUsageSnapshots []ItemUsageSnapshot + +// Main information about snapshot +type ItemSnapshot struct { + // List disk ID + Disks []uint64 `json:"disks"` + + // GUID + GUID string `json:"guid"` + + // Label + Label string `json:"label"` + + // Timestamp + Timestamp uint64 `json:"timestamp"` +} + +// List of snapshots +type ListSnapShots []ItemSnapshot + +// Main information about port forward +type ItemPFW struct { + // ID + ID uint64 `json:"id"` + + // Local IP + LocalIP string `json:"localIp"` + + // Local port + LocalPort uint64 `json:"localPort"` + + // Protocol + Protocol string `json:"protocol"` + + // Public port end + PublicPortEnd uint64 `json:"publicPortEnd"` + + // Public port start + PublicPortStart uint64 `json:"publicPortStart"` + + // Virtuel machine ID + VMID uint64 `json:"vmId"` +} + +// List port forwards +type ListPFWs []ItemPFW + +// Main information about affinity relations +type RecordAffinityRelations struct { + // Other node + OtherNode []interface{} `json:"otherNode"` + + // Other node indirect + OtherNodeIndirect []interface{} `json:"otherNodeIndirect"` + + // Other node indirect soft + OtherNodeIndirectSoft []interface{} `json:"otherNodeIndirectSoft"` + + // Other node soft + OtherNodeSoft []interface{} `json:"otherNodeSoft"` + + // Same node + SameNode []interface{} `json:"sameNode"` + + // Same node soft + SameNodeSoft []interface{} `json:"sameNodeSoft"` +} + +// Main information about attached network +type RecordNetAttach struct { + // Connection ID + ConnID uint64 `json:"connId"` + + // Connection type + ConnType string `json:"connType"` + + // Default GW + DefGW string `json:"defGw"` + + // FLIPGroup ID + FLIPGroupID uint64 `json:"flipgroupId"` + + // GUID + GUID string `json:"guid"` + + // IP address + IPAddress string `json:"ipAddress"` + + // Listen SSH + ListenSSH bool `json:"listenSsh"` + + // MAC + MAC string `json:"mac"` + + // Name + Name string `json:"name"` + + // Network ID + NetID uint64 `json:"netId"` + + // Network mask + NetMask uint64 `json:"netMask"` + + // Network type + NetType string `json:"netType"` + + // PCI slot + PCISlot uint64 `json:"pciSlot"` + + // QOS + QOS QOS `json:"qos"` + + // Target + Target string `json:"target"` + + // Type + Type string `json:"type"` + + // List VNF IDs + VNFs []uint64 `json:"vnfs"` +} + +// Detailed information about audit +type ItemAudit struct { + // Call + Call string `json:"call"` + + // Response time + ResponseTime float64 `json:"responsetime"` + + // Status code + StatusCode uint64 `json:"statuscode"` + + // Timestamp + Timestamp float64 `json:"timestamp"` + + // User + User string `json:"user"` +} + +// List Detailed audits +type ListAudits []ItemAudit + +// Short information about audit +type ItemShortAudit struct { + // Epoch + Epoch float64 `json:"epoch"` + + // Message + Message string `json:"message"` +} + +// List short audits +type ListShortAudits []ItemShortAudit + +// Main information about rule +type ItemRule struct { + // GUID + GUID string `json:"guid"` + + // Key + Key string `json:"key"` + + // Mode + Mode string `json:"mode"` + + // Policy + Policy string `json:"policy"` + + // Topology + Topology string `json:"topology"` + + // Value + Value string `json:"value"` +} + +// List rules +type ListRules []ItemRule + +// Detailed information about compute +type RecordCompute struct { + // Access Control List + ACL RecordACL `json:"ACL"` + + // Account ID + AccountID uint64 `json:"accountId"` + + // Account name + AccountName string `json:"accountName"` + + // Affinity label + AffinityLabel string `json:"affinityLabel"` + + // List affinity rules + AffinityRules ListRules `json:"affinityRules"` + + // Affinity weight + AffinityWeight uint64 `json:"affinityWeight"` + + // List anti affinity rules + AntiAffinityRules ListRules `json:"antiAffinityRules"` + + // Architecture + Architecture string `json:"arch"` + + // Boot order + BootOrder []string `json:"bootOrder"` + + // Boot disk size + BootDiskSize uint64 `json:"bootdiskSize"` + + // Clone reference + CloneReference uint64 `json:"cloneReference"` + + // List clone IDs + Clones []uint64 `json:"clones"` + + // Compute CI ID + ComputeCIID uint64 `json:"computeciId"` + + // Number of cores + CPU uint64 `json:"cpus"` + + // Created by + CreatedBy string `json:"createdBy"` + + // Created time + CreatedTime uint64 `json:"createdTime"` + + // Custom fields items + CustomFields map[string]interface{} `json:"customFields"` + + // Deleted by + DeletedBy string `json:"deletedBy"` + + // Deleted time + DeletedTime uint64 `json:"deletedTime"` + + // Description + Description string `json:"desc"` + + // Devices + Devices interface{} `json:"devices"` + + // List disks in compute + Disks ListComputeDisks `json:"disks"` + + // Driver + Driver string `json:"driver"` + + // Grid ID + GID uint64 `json:"gid"` + + // GUID + GUID uint64 `json:"guid"` + + // ID + ID uint64 `json:"id"` + + // Image ID + ImageID uint64 `json:"imageId"` + + // Image name + ImageName string `json:"imageName"` + + // List interfaces + Interfaces ListInterfaces `json:"interfaces"` + + // Lock status + LockStatus string `json:"lockStatus"` + + // Manager ID + ManagerID uint64 `json:"managerId"` + + // Manager type + ManagerType string `json:"managerType"` + + // Migration job + MigrationJob uint64 `json:"migrationjob"` + + // Milestones + Milestones uint64 `json:"milestones"` + + // Name + Name string `json:"name"` + + // Natable VINS ID + NatableVINSID uint64 `json:"natableVinsId"` + + // Natable VINS IP + NatableVINSIP string `json:"natableVinsIp"` + + // Natable VINS Name + NatableVINSName string `json:"natableVinsName"` + + // Natable VINS network + NatableVINSNetwork string `json:"natableVinsNetwork"` + + // Natable VINS network name + NatableVINSNetworkName string `json:"natableVinsNetworkName"` + + // List OS Users + OSUsers ListOSUser `json:"osUsers"` + + // Pinned or not + Pinned bool `json:"pinned"` + + // Number of RAM + RAM uint64 `json:"ram"` + + // Reference ID + ReferenceID string `json:"referenceId"` + + // Registered or not + Registered bool `json:"registered"` + + // Resource name + ResName string `json:"resName"` + + // Resource group ID + RGID uint64 `json:"rgId"` + + // Resource group name + RGName string `json:"rgName"` + + // List snapsets + SnapSets ListSnapSets `json:"snapSets"` + + // Stateless SepID + StatelessSepID uint64 `json:"statelessSepId"` + + // Stateless SepType + StatelessSepType string `json:"statelessSepType"` + + // Status + Status string `json:"status"` + + // Tags + Tags map[string]string `json:"tags"` + + // Tech status + TechStatus string `json:"techStatus"` + + // Updated by + UpdatedBy string `json:"updatedBy"` + + // Updated time + UpdatedTime uint64 `json:"updatedTime"` + + // User Managed or not + UserManaged bool `json:"userManaged"` + + // Userdata + Userdata interface{} `json:"userdata"` + + // vGPU IDs + VGPUs []uint64 `json:"vgpus"` + + // Virtual image ID + VirtualImageID uint64 `json:"virtualImageId"` + + // Virtual image name + VirtualImageName string `json:"virtualImageName"` +} + +// Main information about OS user +type ItemOSUser struct { + // GUID + GUID string `json:"guid"` + + // Login + Login string `json:"login"` + + // Password + Password string `json:"password"` + + // Public key + PubKey string `json:"pubkey"` +} + +// List OS users +type ListOSUser []ItemOSUser + +// Main information about snapsets +type ItemSnapSet struct { + // List disk IDs + Disks []uint64 `json:"disks"` + + // GUID + GUID string `json:"guid"` + + // Label + Label string `json:"label"` + + // Timestamp + Timestamp uint64 `json:"timestamp"` +} + +// List snapsets +type ListSnapSets []ItemSnapSet + +// Main information about VNF +type ItemVNFInterface struct { + // Connection ID + ConnID uint64 `json:"connId"` + + // Connection type + ConnType string `json:"connType"` + + // Default GW + DefGW string `json:"defGw"` + + // FLIPGroup ID + FLIPGroupID uint64 `json:"flipgroupId"` + + // GUID + GUID string `json:"guid"` + + // IP address + IPAddress string `json:"ipAddress"` + + // Listen SSH or not + ListenSSH bool `json:"listenSsh"` + + // MAC + MAC string `json:"mac"` + + // Name + Name string `json:"name"` + + // Network ID + NetID uint64 `json:"netId"` + + // Network mask + NetMask uint64 `json:"netMask"` + + // Network type + NetType string `json:"netType"` + + // PCI slot + PCISlot uint64 `json:"pciSlot"` + + // QOS + QOS QOS `json:"qos"` + + // Target + Target string `json:"target"` + + // Type + Type string `json:"type"` + + // List VNF IDs + VNFs []uint64 `json:"vnfs"` +} + +type QOS struct { + ERate uint64 `json:"eRate"` + GUID string `json:"guid"` + InBurst uint64 `json:"inBurst"` + InRate uint64 `json:"inRate"` +} + +// List VNF interfaces +type ListInterfaces []ItemVNFInterface + +// List compute disks +type ListComputeDisks []ItemComputeDisk + +// Main information about compute disk +type ItemComputeDisk struct { + // CKey + CKey string `json:"_ckey"` + + // Access Control List + ACL map[string]interface{} `json:"acl"` + + // Account ID + AccountID uint64 `json:"accountId"` + + // Boot partition + BootPartition uint64 `json:"bootPartition"` + + // Created time + CreatedTime uint64 `json:"createdTime"` + + // Deleted time + DeletedTime uint64 `json:"deletedTime"` + + // Description + Description string `json:"desc"` + + // Destruction time + DestructionTime uint64 `json:"destructionTime"` + + // Disk path + DiskPath string `json:"diskPath"` + + // Grid ID + GID uint64 `json:"gid"` + + // GUID + GUID uint64 `json:"guid"` + + // ID + ID uint64 `json:"id"` + + // Image ID + ImageID uint64 `json:"imageId"` + + // List image IDs + Images []uint64 `json:"images"` + + // IO tune + IOTune IOTune `json:"iotune"` + + // IQN + IQN string `json:"iqn"` + + // Login + Login string `json:"login"` + + // Milestones + Milestones uint64 `json:"milestones"` + + // Name + Name string `json:"name"` + + // Order + Order uint64 `json:"order"` + + // Params + Params string `json:"params"` + + // Parent ID + ParentID uint64 `json:"parentId"` + + // Password + Passwd string `json:"passwd"` + + // PCI slot + PCISlot uint64 `json:"pciSlot"` + + // Pool + Pool string `json:"pool"` + + // Present to + PresentTo []uint64 `json:"presentTo"` + + // Purge time + PurgeTime uint64 `json:"purgeTime"` + + // Reality device number + RealityDeviceNumber uint64 `json:"realityDeviceNumber"` + + // Resource ID + ResID string `json:"resId"` + + // Role + Role string `json:"role"` + + // SepID + SepID uint64 `json:"sepId"` + + // Shareable + Shareable bool `json:"shareable"` + + // Size max + SizeMax uint64 `json:"sizeMax"` + + //Size used + SizeUsed float64 `json:"sizeUsed"` + + // List extend snapshots + Snapshots SnapshotExtendList `json:"snapshots"` + + // Status + Status string `json:"status"` + + // Tech status + TechStatus string `json:"techStatus"` + + // Type + Type string `json:"type"` + + // Virtual machine ID + VMID uint64 `json:"vmid"` +} + +// Main information about snapshot extend +type SnapshotExtend struct { + // GUID + GUID string `json:"guid"` + + // Label + Label string `json:"label"` + + // Resource ID + ResID string `json:"resId"` + + // SnapSetGUID + SnapSetGUID string `json:"snapSetGuid"` + + // SnapSetTime + SnapSetTime uint64 `json:"snapSetTime"` + + // TimeStamp + TimeStamp uint64 `json:"timestamp"` +} + +// List Snapshot Extend +type SnapshotExtendList []SnapshotExtend + +// Main information about IO tune +type IOTune struct { + // ReadBytesSec + ReadBytesSec uint64 `json:"read_bytes_sec"` + + // ReadBytesSecMax + ReadBytesSecMax uint64 `json:"read_bytes_sec_max"` + + // ReadIOPSSec + ReadIOPSSec uint64 `json:"read_iops_sec"` + + // ReadIOPSSecMax + ReadIOPSSecMax uint64 `json:"read_iops_sec_max"` + + // SizeIOPSSec + SizeIOPSSec uint64 `json:"size_iops_sec"` + + // TotalBytesSec + TotalBytesSec uint64 `json:"total_bytes_sec"` + + // TotalBytesSecMax + TotalBytesSecMax uint64 `json:"total_bytes_sec_max"` + + // TotalIOPSSec + TotalIOPSSec uint64 `json:"total_iops_sec"` + + // TotalIOPSSecMax + TotalIOPSSecMax uint64 `json:"total_iops_sec_max"` + + // WriteBytesSec + WriteBytesSec uint64 `json:"write_bytes_sec"` + + // WriteBytesSecMax + WriteBytesSecMax uint64 `json:"write_bytes_sec_max"` + + // WriteIOPSSec + WriteIOPSSec uint64 `json:"write_iops_sec"` + + // WriteIOPSSecMax + WriteIOPSSecMax uint64 `json:"write_iops_sec_max"` +} + +// Main information about compute +type ItemCompute struct { + // Access Control List + ACL ListACL `json:"acl"` + + // Account ID + AccountID uint64 `json:"accountId"` + + // Account name + AccountName string `json:"accountName"` + + // Affinity label + AffinityLabel string `json:"affinityLabel"` + + // List affinity rules + AffinityRules ListRules `json:"affinityRules"` + + // Affinity weight + AffinityWeight uint64 `json:"affinityWeight"` + + // List anti affinity rules + AntiAffinityRules ListRules `json:"antiAffinityRules"` + + // Architecture + Architecture string `json:"arch"` + + // Boot order + BootOrder []string `json:"bootOrder"` + + // Boot disk size + BootDiskSize uint64 `json:"bootdiskSize"` + + // Clone reference + CloneReference uint64 `json:"cloneReference"` + + // List clone IDs + Clones []uint64 `json:"clones"` + + // Compute CI ID + ComputeCIID uint64 `json:"computeciId"` + + // Number of cores + CPU uint64 `json:"cpus"` + + // Created by + CreatedBy string `json:"createdBy"` + + // Created time + CreatedTime uint64 `json:"createdTime"` + + // Custom fields list + CustomFields map[string]interface{} `json:"customFields"` + + // Deleted by + DeletedBy string `json:"deletedBy"` + + // Deleted time + DeletedTime uint64 `json:"deletedTime"` + + // Description + Description string `json:"desc"` + + // Devices + Devices interface{} `json:"devices"` + + // List disk items + Disks []InfoDisk `json:"disks"` + + // Driver + Driver string `json:"driver"` + + // Grid ID + GID uint64 `json:"gid"` + + // GUID + GUID uint64 `json:"guid"` + + // ID + ID uint64 `json:"id"` + + // Image ID + ImageID uint64 `json:"imageId"` + + // List interfaces + Interfaces ListInterfaces `json:"interfaces"` + + // Lock status + LockStatus string `json:"lockStatus"` + + // Manager ID + ManagerID uint64 `json:"managerId"` + + // Manager type + ManagerType string `json:"managerType"` + + // Migration job + MigrationJob uint64 `json:"migrationjob"` + + // Milestones + Milestones uint64 `json:"milestones"` + + // Name + Name string `json:"name"` + + // Pinned or not + Pinned bool `json:"pinned"` + + // Number of RAM + RAM uint64 `json:"ram"` + + // Reference ID + ReferenceID string `json:"referenceId"` + + // Registered + Registered bool `json:"registered"` + + // Resource name + ResName string `json:"resName"` + + // Resource group ID + RGID uint64 `json:"rgId"` + + // Resource group name + RGName string `json:"rgName"` + + // List snapsets + SnapSets ListSnapSets `json:"snapSets"` + + // Stateless SepID + StatelessSepID uint64 `json:"statelessSepId"` + + // Stateless SepType + StatelessSepType string `json:"statelessSepType"` + + // Status + Status string `json:"status"` + + // Tags + Tags map[string]string `json:"tags"` + + // Tech status + TechStatus string `json:"techStatus"` + + // Total disk size + TotalDiskSize uint64 `json:"totalDisksSize"` + + // Updated by + UpdatedBy string `json:"updatedBy"` + + // Updated time + UpdatedTime uint64 `json:"updatedTime"` + + // User Managed or not + UserManaged bool `json:"userManaged"` + + // List vGPU IDs + VGPUs []uint64 `json:"vgpus"` + + // VINS connected + VINSConnected uint64 `json:"vinsConnected"` + + // Virtual image ID + VirtualImageID uint64 `json:"virtualImageId"` +} + +// Information Disk +type InfoDisk struct { + // ID + ID uint64 `json:"id"` + + // PCISlot + PCISlot uint64 `json:"pciSlot"` +} + +// List information about computes +type ListComputes []ItemCompute diff --git a/internal/service/cloudapi/kvmvm/osusers_subresource.go b/internal/service/cloudapi/kvmvm/osusers_subresource.go index cf64f7e..1c35fbd 100644 --- a/internal/service/cloudapi/kvmvm/osusers_subresource.go +++ b/internal/service/cloudapi/kvmvm/osusers_subresource.go @@ -38,13 +38,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func parseOsUsers(logins []OsUserRecord) []interface{} { +func parseOsUsers(logins ListOSUser) []interface{} { var result = make([]interface{}, len(logins)) for index, value := range logins { elem := make(map[string]interface{}) - elem["guid"] = value.Guid + elem["guid"] = value.GUID elem["login"] = value.Login elem["password"] = value.Password elem["public_key"] = value.PubKey @@ -70,9 +70,9 @@ func osUsersSubresourceSchemaMake() map[string]*schema.Schema { }, "password": { - Type: schema.TypeString, - Computed: true, - Sensitive: true, + Type: schema.TypeString, + Computed: true, + //Sensitive: true, Description: "Password of this guest OS user.", }, diff --git a/internal/service/cloudapi/kvmvm/resource_check_input_values.go b/internal/service/cloudapi/kvmvm/resource_check_input_values.go new file mode 100644 index 0000000..fe2e313 --- /dev/null +++ b/internal/service/cloudapi/kvmvm/resource_check_input_values.go @@ -0,0 +1,106 @@ +package kvmvm + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" + log "github.com/sirupsen/logrus" +) + +func existRgID(ctx context.Context, d *schema.ResourceData, m interface{}) bool { + log.Debugf("resourceComputeCreate: check access for RG ID: %v", d.Get("rg_id").(int)) + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + rgList := []struct { + ID int `json:"id"` + }{} + + rgListAPI := "/restmachine/cloudapi/rg/list" + urlValues.Add("includedeleted", "false") + rgListRaw, err := c.DecortAPICall(ctx, "POST", rgListAPI, urlValues) + if err != nil { + return false + } + err = json.Unmarshal([]byte(rgListRaw), &rgList) + if err != nil { + return false + } + rgId := d.Get("rg_id").(int) + for _, rg := range rgList { + if rg.ID == rgId { + return true + } + } + return false +} + +func existImageId(ctx context.Context, d *schema.ResourceData, m interface{}) bool { + log.Debugf("resourceComputeCreate: check access for image ID: %v", d.Get("image_id").(int)) + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + imageList := []struct { + ID int `json:"id"` + }{} + imageListAPI := "/restmachine/cloudapi/image/list" + imageListRaw, err := c.DecortAPICall(ctx, "POST", imageListAPI, urlValues) + if err != nil { + return false + } + err = json.Unmarshal([]byte(imageListRaw), &imageList) + if err != nil { + return false + } + imageId := d.Get("image_id").(int) + for _, image := range imageList { + if image.ID == imageId { + return true + } + } + return false +} + +func existVinsIdInList(vinsId int, vinsList []struct { + ID int `json:"id"` +}) bool { + for _, vins := range vinsList { + if vinsId == vins.ID { + return true + } + } + return false +} + +func existVinsId(ctx context.Context, d *schema.ResourceData, m interface{}) (int, bool) { + log.Debugf("resourceComputeCreate: check access for vinses IDs") + + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + + vinsListAPI := "/restmachine/cloudapi/vins/list" + urlValues.Add("includeDeleted", "false") + vinsList := []struct { + ID int `json:"id"` + }{} + vinsListRaw, err := c.DecortAPICall(ctx, "POST", vinsListAPI, urlValues) + if err != nil { + return 0, false + } + err = json.Unmarshal([]byte(vinsListRaw), &vinsList) + if err != nil { + return 0, false + } + + networks := d.Get("network").(*schema.Set).List() + + for _, networkInterface := range networks { + + networkItem := networkInterface.(map[string]interface{}) + if !existVinsIdInList(networkItem["net_id"].(int), vinsList) { + return networkItem["net_id"].(int), false + } + } + return 0, true +} diff --git a/internal/service/cloudapi/kvmvm/resource_compute.go b/internal/service/cloudapi/kvmvm/resource_compute.go index 8a7f80c..3ff0341 100644 --- a/internal/service/cloudapi/kvmvm/resource_compute.go +++ b/internal/service/cloudapi/kvmvm/resource_compute.go @@ -34,13 +34,13 @@ package kvmvm import ( "context" - "encoding/json" "fmt" "net/url" "strconv" "github.com/rudecs/terraform-provider-decort/internal/constants" "github.com/rudecs/terraform-provider-decort/internal/controller" + "github.com/rudecs/terraform-provider-decort/internal/dc" "github.com/rudecs/terraform-provider-decort/internal/statefuncs" "github.com/rudecs/terraform-provider-decort/internal/status" log "github.com/sirupsen/logrus" @@ -50,34 +50,35 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -func cloudInitDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool { - if oldVal == "" && newVal != "applied" { - // if old value for "cloud_init" resource is empty string, it means that we are creating new compute - // and there is a chance that the user will want custom cloud init parameters - so we check if - // cloud_init is explicitly set in TF file by making sure that its new value is different from "applied", - // which is a reserved key word. - log.Debugf("cloudInitDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal) - return false // there is a difference between stored and new value - } - log.Debugf("cloudInitDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=TRUE", key, oldVal, newVal) - return true // suppress difference -} - func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { // we assume all mandatory parameters it takes to create a comptue instance are properly // specified - we rely on schema "Required" attributes to let Terraform validate them for us log.Debugf("resourceComputeCreate: called for Compute name %q, RG ID %d", d.Get("name").(string), d.Get("rg_id").(int)) + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + + if !existRgID(ctx, d, m) { + return diag.Errorf("resourceComputeCreate: can't create Compute bacause rgID %d not allowed or does not exist", d.Get("rg_id").(int)) + } + + if !existImageId(ctx, d, m) { + return diag.Errorf("resourceComputeCreate: can't create Compute bacause imageID %d not allowed or does not exist", d.Get("image_id").(int)) + } + + if _, ok := d.GetOk("network"); ok { + if vinsId, ok := existVinsId(ctx, d, m); !ok { + return diag.Errorf("resourceResgroupCreate: can't create RG bacause vins ID %d not allowed or does not exist", vinsId) + } + } // create basic Compute (i.e. without extra disks and network connections - those will be attached // by subsequent individual API calls). - c := m.(*controller.ControllerCfg) - urlValues := &url.Values{} - urlValues.Add("rgId", fmt.Sprintf("%d", d.Get("rg_id").(int))) + urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int))) urlValues.Add("name", d.Get("name").(string)) - urlValues.Add("cpu", fmt.Sprintf("%d", d.Get("cpu").(int))) - urlValues.Add("ram", fmt.Sprintf("%d", d.Get("ram").(int))) - urlValues.Add("imageId", fmt.Sprintf("%d", d.Get("image_id").(int))) + urlValues.Add("cpu", strconv.Itoa(d.Get("cpu").(int))) + urlValues.Add("ram", strconv.Itoa(d.Get("ram").(int))) + urlValues.Add("imageId", strconv.Itoa(d.Get("image_id").(int))) urlValues.Add("netType", "NONE") urlValues.Add("start", "0") // at the 1st step create compute in a stopped state @@ -156,6 +157,8 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf d.SetId(apiResp) // update ID of the resource to tell Terraform that the resource exists, albeit partially compId, _ := strconv.Atoi(apiResp) + warnings := dc.Warnings{} + cleanup := false defer func() { if cleanup { @@ -174,10 +177,8 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf log.Debugf("resourceComputeCreate: new simple Compute ID %d, name %s created", compId, d.Get("name").(string)) - // Configure data disks if any argVal, argSet = d.GetOk("extra_disks") if argSet && argVal.(*schema.Set).Len() > 0 { - // urlValues.Add("desc", argVal.(string)) log.Debugf("resourceComputeCreate: calling utilityComputeExtraDisksConfigure to attach %d extra disk(s)", argVal.(*schema.Set).Len()) err = utilityComputeExtraDisksConfigure(ctx, d, m, false) // do_delta=false, as we are working on a new compute if err != nil { @@ -186,11 +187,10 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf return diag.FromErr(err) } } - // Configure external networks if any argVal, argSet = d.GetOk("network") if argSet && argVal.(*schema.Set).Len() > 0 { log.Debugf("resourceComputeCreate: calling utilityComputeNetworksConfigure to attach %d network(s)", argVal.(*schema.Set).Len()) - err = utilityComputeNetworksConfigure(ctx, d, m, false, true) // do_delta=false, as we are working on a new compute + err = utilityComputeNetworksConfigure(ctx, d, m, false, true) if err != nil { log.Errorf("resourceComputeCreate: error when attaching networks to a new Compute ID %d: %s", compId, err) cleanup = true @@ -205,8 +205,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf reqValues.Add("computeId", fmt.Sprintf("%d", compId)) log.Debugf("resourceComputeCreate: starting Compute ID %d after completing its resource configuration", compId) if _, err := c.DecortAPICall(ctx, "POST", ComputeStartAPI, reqValues); err != nil { - cleanup = true - return diag.FromErr(err) + warnings.Add(err) } } @@ -219,7 +218,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf urlValues.Add("computeId", fmt.Sprintf("%d", compId)) log.Debugf("resourceComputeCreate: enable=%t Compute ID %d after completing its resource configuration", compId, enabled) if _, err := c.DecortAPICall(ctx, "POST", api, urlValues); err != nil { - return diag.FromErr(err) + warnings.Add(err) } } @@ -232,7 +231,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf urlValues.Add("affinityLabel", affinityLabel) _, err := c.DecortAPICall(ctx, "POST", ComputeAffinityLabelSetAPI, urlValues) if err != nil { - return diag.FromErr(err) + warnings.Add(err) } urlValues = &url.Values{} } @@ -287,8 +286,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf urlValues.Add("value", arConv["value"].(string)) _, err := c.DecortAPICall(ctx, "POST", ComputeAffinityRuleAddAPI, urlValues) if err != nil { - cleanup = true - return diag.FromErr(err) + warnings.Add(err) } urlValues = &url.Values{} } @@ -310,8 +308,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf urlValues.Add("value", arConv["value"].(string)) _, err := c.DecortAPICall(ctx, "POST", ComputeAntiAffinityRuleAddAPI, urlValues) if err != nil { - cleanup = true - return diag.FromErr(err) + warnings.Add(err) } urlValues = &url.Values{} } @@ -319,13 +316,124 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf } } + if tags, ok := d.GetOk("tags"); ok { + log.Debugf("resourceComputeCreate: Create tags on ComputeID: %d", compId) + addedTags := tags.(*schema.Set).List() + if len(addedTags) > 0 { + for _, tagInterface := range addedTags { + urlValues = &url.Values{} + tagItem := tagInterface.(map[string]interface{}) + + urlValues.Add("computeId", d.Id()) + urlValues.Add("key", tagItem["key"].(string)) + urlValues.Add("value", tagItem["value"].(string)) + _, err := c.DecortAPICall(ctx, "POST", ComputeTagAddAPI, urlValues) + if err != nil { + warnings.Add(err) + } + } + } + } + + if pfws, ok := d.GetOk("port_forwarding"); ok { + log.Debugf("resourceComputeCreate: Create port farwarding on ComputeID: %d", compId) + addedPfws := pfws.(*schema.Set).List() + if len(addedPfws) > 0 { + for _, pfwInterface := range addedPfws { + urlValues = &url.Values{} + pfwItem := pfwInterface.(map[string]interface{}) + + urlValues.Add("computeId", d.Id()) + urlValues.Add("publicPortStart", strconv.Itoa(pfwItem["public_port_start"].(int))) + urlValues.Add("publicPortEnd", strconv.Itoa(pfwItem["public_port_end"].(int))) + urlValues.Add("localBasePort", strconv.Itoa(pfwItem["local_port"].(int))) + urlValues.Add("proto", pfwItem["proto"].(string)) + _, err := c.DecortAPICall(ctx, "POST", ComputePfwAddAPI, urlValues) + if err != nil { + warnings.Add(err) + } + } + } + } + if userAcess, ok := d.GetOk("user_access"); ok { + log.Debugf("resourceComputeCreate: Create user access on ComputeID: %d", compId) + usersAcess := userAcess.(*schema.Set).List() + if len(usersAcess) > 0 { + for _, userAcessInterface := range usersAcess { + urlValues = &url.Values{} + userAccessItem := userAcessInterface.(map[string]interface{}) + + urlValues.Add("computeId", d.Id()) + urlValues.Add("userName", userAccessItem["username"].(string)) + urlValues.Add("accesstype", userAccessItem["access_type"].(string)) + _, err := c.DecortAPICall(ctx, "POST", ComputeUserGrantAPI, urlValues) + if err != nil { + warnings.Add(err) + } + } + } + } + + if snapshotList, ok := d.GetOk("snapshot"); ok { + log.Debugf("resourceComputeCreate: Create snapshot on ComputeID: %d", compId) + snapshots := snapshotList.(*schema.Set).List() + if len(snapshots) > 0 { + for _, snapshotInterface := range snapshots { + urlValues = &url.Values{} + snapshotItem := snapshotInterface.(map[string]interface{}) + + urlValues.Add("computeId", d.Id()) + urlValues.Add("userName", snapshotItem["label"].(string)) + _, err := c.DecortAPICall(ctx, "POST", ComputeSnapshotCreateAPI, urlValues) + if err != nil { + warnings.Add(err) + } + } + } + } + + if cdtList, ok := d.GetOk("cd"); ok { + log.Debugf("resourceComputeCreate: Create cd on ComputeID: %d", compId) + cds := cdtList.(*schema.Set).List() + if len(cds) > 0 { + urlValues = &url.Values{} + snapshotItem := cds[0].(map[string]interface{}) + + urlValues.Add("computeId", d.Id()) + urlValues.Add("cdromId", strconv.Itoa(snapshotItem["cdrom_id"].(int))) + _, err := c.DecortAPICall(ctx, "POST", ComputeCdInsertAPI, urlValues) + if err != nil { + warnings.Add(err) + } + } + } + + if d.Get("pin_to_stack").(bool) == true { + urlValues := &url.Values{} + urlValues.Add("computeId", d.Id()) + _, err := c.DecortAPICall(ctx, "POST", ComputePinToStackAPI, urlValues) + if err != nil { + warnings.Add(err) + } + } + + if d.Get("pause").(bool) == true { + urlValues := &url.Values{} + urlValues.Add("computeId", d.Id()) + _, err := c.DecortAPICall(ctx, "POST", ComputePauseAPI, urlValues) + if err != nil { + warnings.Add(err) + } + } + log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", compId, d.Get("name").(string)) // We may reuse dataSourceComputeRead here as we maintain similarity // between Compute resource and Compute data source schemas // Compute read function will also update resource ID on success, so that Terraform // will know the resource exists - return resourceComputeRead(ctx, d, m) + defer resourceComputeRead(ctx, d, m) + return warnings.Get() } func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { @@ -334,19 +442,7 @@ func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interfac c := m.(*controller.ControllerCfg) - compFacts, err := utilityComputeCheckPresence(ctx, d, m) - if compFacts == "" { - if err != nil { - return diag.FromErr(err) - } - // Compute with such name and RG ID was not found - return nil - } - - compute := &ComputeGetResp{} - err = json.Unmarshal([]byte(compFacts), compute) - - log.Debugf("resourceComputeRead: compute is: %+v", compute) + compute, err := utilityComputeCheckPresence(ctx, d, m) if err != nil { return diag.FromErr(err) } @@ -376,17 +472,12 @@ func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interfac return diag.Errorf("The compute is in status: %s, please, contant the support for more information", compute.Status) } - compFacts, err = utilityComputeCheckPresence(ctx, d, m) - log.Debugf("resourceComputeRead: after changes compute is: %s", compFacts) - if compFacts == "" { - if err != nil { - return diag.FromErr(err) - } - // Compute with such name and RG ID was not found - return nil + compute, err = utilityComputeCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) } - if err = flattenCompute(d, compFacts); err != nil { + if err = flattenCompute(d, compute); err != nil { return diag.FromErr(err) } @@ -401,13 +492,23 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf d.Id(), d.Get("name").(string), d.Get("rg_id").(int)) c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} - computeRaw, err := utilityComputeCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) + if !existRgID(ctx, d, m) { + return diag.Errorf("resourceComputeUpdate: can't update Compute bacause rgID %d not allowed or does not exist", d.Get("rg_id").(int)) + } + + if !existImageId(ctx, d, m) { + return diag.Errorf("resourceComputeUpdate: can't update Compute bacause imageID %d not allowed or does not exist", d.Get("image_id").(int)) + } + + if _, ok := d.GetOk("network"); ok { + if vinsId, ok := existVinsId(ctx, d, m); !ok { + return diag.Errorf("resourceResgroupUpdate: can't update RG bacause vinsID %d not allowed or does not exist", vinsId) + } } - compute := &ComputeGetResp{} - err = json.Unmarshal([]byte(computeRaw), compute) + + compute, err := utilityComputeCheckPresence(ctx, d, m) if err != nil { return diag.FromErr(err) } @@ -461,7 +562,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf */ // 1. Resize CPU/RAM - urlValues := &url.Values{} + urlValues = &url.Values{} doUpdate := false urlValues.Add("computeId", d.Id()) @@ -495,12 +596,20 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf // 2. Resize (grow) Boot disk oldSize, newSize := d.GetChange("boot_disk_size") if oldSize.(int) < newSize.(int) { - bdsParams := &url.Values{} - bdsParams.Add("diskId", fmt.Sprintf("%d", d.Get("boot_disk_id").(int))) - bdsParams.Add("size", fmt.Sprintf("%d", newSize.(int))) + urlValues := &url.Values{} + if diskId, ok := d.GetOk("boot_disk_id"); ok { + urlValues.Add("diskId", strconv.Itoa(diskId.(int))) + } else { + bootDisk, err := utilityComputeBootDiskCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + urlValues.Add("diskId", strconv.FormatUint(bootDisk.ID, 10)) + } + urlValues.Add("size", strconv.Itoa(newSize.(int))) log.Debugf("resourceComputeUpdate: compute ID %s, boot disk ID %d resize %d -> %d", d.Id(), d.Get("boot_disk_id").(int), oldSize.(int), newSize.(int)) - _, err := c.DecortAPICall(ctx, "POST", DisksResizeAPI, bdsParams) + _, err := c.DecortAPICall(ctx, "POST", DisksResizeAPI, urlValues) if err != nil { return diag.FromErr(err) } @@ -532,20 +641,6 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf } } - if d.HasChange("started") { - params := &url.Values{} - params.Add("computeId", d.Id()) - if d.Get("started").(bool) { - if _, err := c.DecortAPICall(ctx, "POST", ComputeStartAPI, params); err != nil { - return diag.FromErr(err) - } - } else { - if _, err := c.DecortAPICall(ctx, "POST", ComputeStopAPI, params); err != nil { - return diag.FromErr(err) - } - } - } - urlValues = &url.Values{} if d.HasChange("disks") { deletedDisks := make([]interface{}, 0) @@ -634,6 +729,20 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf } } + if d.HasChange("started") { + params := &url.Values{} + params.Add("computeId", d.Id()) + if d.Get("started").(bool) { + if _, err := c.DecortAPICall(ctx, "POST", ComputeStartAPI, params); err != nil { + return diag.FromErr(err) + } + } else { + if _, err := c.DecortAPICall(ctx, "POST", ComputeStopAPI, params); err != nil { + return diag.FromErr(err) + } + } + } + if d.HasChange("affinity_label") { affinityLabel := d.Get("affinity_label").(string) urlValues.Add("computeId", d.Id()) @@ -783,107 +892,535 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf } - // we may reuse dataSourceComputeRead here as we maintain similarity - // between Compute resource and Compute data source schemas - return resourceComputeRead(ctx, d, m) -} + if d.HasChange("tags") { + oldSet, newSet := d.GetChange("tags") + deletedTags := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + if len(deletedTags) > 0 { + for _, tagInterface := range deletedTags { + urlValues := &url.Values{} + tagItem := tagInterface.(map[string]interface{}) -func isContainsDisk(els []interface{}, el interface{}) bool { - for _, elOld := range els { - elOldConv := elOld.(map[string]interface{}) - elConv := el.(map[string]interface{}) - if elOldConv["disk_name"].(string) == elConv["disk_name"].(string) { - return true + urlValues.Add("computeId", d.Id()) + urlValues.Add("key", tagItem["key"].(string)) + _, err := c.DecortAPICall(ctx, "POST", ComputeTagRemoveAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } } - } - return false -} -func isContainsAR(els []interface{}, el interface{}) bool { - for _, elOld := range els { - elOldConv := elOld.(map[string]interface{}) - elConv := el.(map[string]interface{}) - if elOldConv["key"].(string) == elConv["key"].(string) && - elOldConv["value"].(string) == elConv["value"].(string) && - elOldConv["mode"].(string) == elConv["mode"].(string) && - elOldConv["topology"].(string) == elConv["topology"].(string) && - elOldConv["policy"].(string) == elConv["policy"].(string) { - return true + addedTags := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + if len(addedTags) > 0 { + for _, tagInterface := range addedTags { + urlValues := &url.Values{} + tagItem := tagInterface.(map[string]interface{}) + + urlValues.Add("computeId", d.Id()) + urlValues.Add("key", tagItem["key"].(string)) + urlValues.Add("value", tagItem["value"].(string)) + _, err := c.DecortAPICall(ctx, "POST", ComputeTagAddAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } } } - return false -} -func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - // NOTE: this function destroys target Compute instance "permanently", so - // there is no way to restore it. - // If compute being destroyed has some extra disks attached, they are - // detached from the compute - log.Debugf("resourceComputeDelete: called for Compute name %s, RG ID %d", - d.Get("name").(string), d.Get("rg_id").(int)) + if d.HasChange("port_forwarding") { + oldSet, newSet := d.GetChange("port_forwarding") + deletedPfws := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + if len(deletedPfws) > 0 { + for _, pfwInterface := range deletedPfws { + urlValues := &url.Values{} + pfwItem := pfwInterface.(map[string]interface{}) - c := m.(*controller.ControllerCfg) + urlValues.Add("computeId", d.Id()) + urlValues.Add("publicPortStart", strconv.Itoa(pfwItem["public_port_start"].(int))) + if pfwItem["public_port_end"].(int) == -1 { + urlValues.Add("publicPortEnd", strconv.Itoa(pfwItem["public_port_start"].(int))) + } else { + urlValues.Add("publicPortEnd", strconv.Itoa(pfwItem["public_port_end"].(int))) + } + urlValues.Add("localBasePort", strconv.Itoa(pfwItem["local_port"].(int))) + urlValues.Add("proto", pfwItem["proto"].(string)) + _, err := c.DecortAPICall(ctx, "POST", ComputePfwDelAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + } - params := &url.Values{} - params.Add("computeId", d.Id()) - params.Add("permanently", strconv.FormatBool(d.Get("permanently").(bool))) - params.Add("detachDisks", strconv.FormatBool(d.Get("detach_disks").(bool))) + addedPfws := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + if len(addedPfws) > 0 { + for _, pfwInterface := range addedPfws { + urlValues := &url.Values{} + pfwItem := pfwInterface.(map[string]interface{}) - if _, err := c.DecortAPICall(ctx, "POST", ComputeDeleteAPI, params); err != nil { - return diag.FromErr(err) + urlValues.Add("computeId", d.Id()) + urlValues.Add("publicPortStart", strconv.Itoa(pfwItem["public_port_start"].(int))) + urlValues.Add("publicPortEnd", strconv.Itoa(pfwItem["public_port_end"].(int))) + urlValues.Add("localBasePort", strconv.Itoa(pfwItem["local_port"].(int))) + urlValues.Add("proto", pfwItem["proto"].(string)) + _, err := c.DecortAPICall(ctx, "POST", ComputePfwAddAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + } } - return nil -} - -func ResourceComputeSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.", - }, + if d.HasChange("user_access") { + oldSet, newSet := d.GetChange("user_access") + deletedUserAcess := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + if len(deletedUserAcess) > 0 { + for _, userAcessInterface := range deletedUserAcess { + urlValues := &url.Values{} + userAccessItem := userAcessInterface.(map[string]interface{}) - "rg_id": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntAtLeast(1), - Description: "ID of the resource group where this compute should be deployed.", - }, + urlValues.Add("computeId", d.Id()) + urlValues.Add("userName", userAccessItem["username"].(string)) + _, err := c.DecortAPICall(ctx, "POST", ComputeUserRevokeAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + } - "driver": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: statefuncs.StateFuncToUpper, - ValidateFunc: validation.StringInSlice([]string{"KVM_X86", "KVM_PPC"}, false), // observe case while validating - Description: "Hardware architecture of this compute instance.", - }, + addedUserAccess := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + if len(addedUserAccess) > 0 { + for _, userAccessInterface := range addedUserAccess { + urlValues := &url.Values{} + userAccessItem := userAccessInterface.(map[string]interface{}) - "cpu": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntBetween(1, constants.MaxCpusPerCompute), - Description: "Number of CPUs to allocate to this compute instance.", - }, + urlValues.Add("computeId", d.Id()) + urlValues.Add("userName", userAccessItem["username"].(string)) + urlValues.Add("accesstype", userAccessItem["access_type"].(string)) + _, err := c.DecortAPICall(ctx, "POST", ComputeUserGrantAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + } + } - "ram": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntAtLeast(constants.MinRamPerCompute), - Description: "Amount of RAM in MB to allocate to this compute instance.", - }, + if d.HasChange("snapshot") { + oldSet, newSet := d.GetChange("snapshot") + deletedSnapshots := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + if len(deletedSnapshots) > 0 { + for _, snapshotInterface := range deletedSnapshots { + urlValues := &url.Values{} + snapshotItem := snapshotInterface.(map[string]interface{}) - "image_id": { - Type: schema.TypeInt, - Required: true, + urlValues.Add("computeId", d.Id()) + urlValues.Add("label", snapshotItem["label"].(string)) + _, err := c.DecortAPICall(ctx, "POST", ComputeSnapshotDeleteAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + } + + addedSnapshots := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + if len(addedSnapshots) > 0 { + for _, snapshotInterface := range addedSnapshots { + urlValues := &url.Values{} + snapshotItem := snapshotInterface.(map[string]interface{}) + + urlValues.Add("computeId", d.Id()) + urlValues.Add("label", snapshotItem["label"].(string)) + _, err := c.DecortAPICall(ctx, "POST", ComputeSnapshotCreateAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + } + } + + if d.HasChange("rollback") { + if rollback, ok := d.GetOk("rollback"); ok { + urlValues := &url.Values{} + + //Compute must be stopped before rollback + urlValues.Add("computeId", d.Id()) + urlValues.Add("force", "false") + _, err := c.DecortAPICall(ctx, "POST", ComputeStopAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + + urlValues = &url.Values{} + rollbackInterface := rollback.(*schema.Set).List()[0] + rollbackItem := rollbackInterface.(map[string]interface{}) + + urlValues.Add("computeId", d.Id()) + urlValues.Add("label", rollbackItem["label"].(string)) + _, err = c.DecortAPICall(ctx, "POST", ComputeSnapshotRollbackAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + } + + if d.HasChange("cd") { + oldSet, newSet := d.GetChange("cd") + deletedCd := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + if len(deletedCd) > 0 { + urlValues := &url.Values{} + + urlValues.Add("computeId", d.Id()) + _, err := c.DecortAPICall(ctx, "POST", ComputeCdEjectAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + + addedCd := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + if len(addedCd) > 0 { + urlValues := &url.Values{} + cdItem := addedCd[0].(map[string]interface{}) + + urlValues.Add("computeId", d.Id()) + urlValues.Add("cdromId", strconv.Itoa(cdItem["cdrom_id"].(int))) + _, err := c.DecortAPICall(ctx, "POST", ComputeCdInsertAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + } + + if d.HasChange("pin_to_stack") { + oldPin, newPin := d.GetChange("pin_to_stack") + urlValues := &url.Values{} + urlValues.Add("computeId", d.Id()) + if oldPin.(bool) == true && newPin.(bool) == false { + _, err := c.DecortAPICall(ctx, "POST", ComputeUnpinFromStackAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + if oldPin.(bool) == false && newPin.(bool) == true { + _, err := c.DecortAPICall(ctx, "POST", ComputePinToStackAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + } + + if d.HasChange("pause") { + oldPause, newPause := d.GetChange("pause") + urlValues := &url.Values{} + urlValues.Add("computeId", d.Id()) + if oldPause.(bool) == true && newPause.(bool) == false { + _, err := c.DecortAPICall(ctx, "POST", ComputeResumeAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + if oldPause.(bool) == false && newPause.(bool) == true { + _, err := c.DecortAPICall(ctx, "POST", ComputePauseAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + } + + if d.HasChange("reset") { + oldReset, newReset := d.GetChange("reset") + urlValues := &url.Values{} + urlValues.Add("computeId", d.Id()) + if oldReset.(bool) == false && newReset.(bool) == true { + _, err := c.DecortAPICall(ctx, "POST", ComputeResetAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + } + + //redeploy + if d.HasChange("image_id") { + oldImage, newImage := d.GetChange("image_id") + urlValues := &url.Values{} + urlValues.Add("computeId", d.Id()) + urlValues.Add("force", "false") + _, err := c.DecortAPICall(ctx, "POST", ComputeStopAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + + if oldImage.(int) != newImage.(int) { + urlValues := &url.Values{} + + urlValues.Add("computeId", d.Id()) + urlValues.Add("imageId", strconv.Itoa(newImage.(int))) + if diskSize, ok := d.GetOk("boot_disk_size"); ok { + urlValues.Add("diskSize", strconv.Itoa(diskSize.(int))) + } + if dataDisks, ok := d.GetOk("data_disks"); ok { + urlValues.Add("dataDisks", dataDisks.(string)) + } + if autoStart, ok := d.GetOk("auto_start"); ok { + urlValues.Add("autoStart", strconv.FormatBool(autoStart.(bool))) + } + if forceStop, ok := d.GetOk("force_stop"); ok { + urlValues.Add("forceStop", strconv.FormatBool(forceStop.(bool))) + } + _, err := c.DecortAPICall(ctx, "POST", ComputeRedeployAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + } + + // we may reuse dataSourceComputeRead here as we maintain similarity + // between Compute resource and Compute data source schemas + return resourceComputeRead(ctx, d, m) +} + +func isContainsDisk(els []interface{}, el interface{}) bool { + for _, elOld := range els { + elOldConv := elOld.(map[string]interface{}) + elConv := el.(map[string]interface{}) + if elOldConv["disk_name"].(string) == elConv["disk_name"].(string) { + return true + } + } + return false +} + +func isContainsAR(els []interface{}, el interface{}) bool { + for _, elOld := range els { + elOldConv := elOld.(map[string]interface{}) + elConv := el.(map[string]interface{}) + if elOldConv["key"].(string) == elConv["key"].(string) && + elOldConv["value"].(string) == elConv["value"].(string) && + elOldConv["mode"].(string) == elConv["mode"].(string) && + elOldConv["topology"].(string) == elConv["topology"].(string) && + elOldConv["policy"].(string) == elConv["policy"].(string) { + return true + } + } + return false +} + +func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + // NOTE: this function destroys target Compute instance "permanently", so + // there is no way to restore it. + // If compute being destroyed has some extra disks attached, they are + // detached from the compute + log.Debugf("resourceComputeDelete: called for Compute name %s, RG ID %d", + d.Get("name").(string), d.Get("rg_id").(int)) + + c := m.(*controller.ControllerCfg) + + params := &url.Values{} + params.Add("computeId", d.Id()) + params.Add("permanently", strconv.FormatBool(d.Get("permanently").(bool))) + params.Add("detachDisks", strconv.FormatBool(d.Get("detach_disks").(bool))) + + if _, err := c.DecortAPICall(ctx, "POST", ComputeDeleteAPI, params); err != nil { + return diag.FromErr(err) + } + + return nil +} + +func disksSubresourceSchemaMake() map[string]*schema.Schema { + rets := map[string]*schema.Schema{ + "disk_name": { + Type: schema.TypeString, + Required: true, + Description: "Name for disk", + }, + "size": { + Type: schema.TypeInt, + Required: true, + Description: "Disk size in GiB", + }, + "disk_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"B", "D"}, false), + Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data'", + }, + "sep_id": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Storage endpoint provider ID; by default the same with boot disk", + }, + "pool": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Pool name; by default will be chosen automatically", + }, + "desc": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional description", + }, + "image_id": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Specify image id for create disk from template", + }, + "permanently": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: "Disk deletion status", + }, + "disk_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Disk ID", + }, + "shareable": { + Type: schema.TypeBool, + Computed: true, + }, + "size_max": { + Type: schema.TypeInt, + Computed: true, + }, + "size_used": { + Type: schema.TypeInt, + Computed: true, + }, + } + return rets +} + +func tagsSubresourceSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + } +} + +func portForwardingSubresourceSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "public_port_start": { + Type: schema.TypeInt, + Required: true, + }, + "public_port_end": { + Type: schema.TypeInt, + Optional: true, + Default: -1, + }, + "local_port": { + Type: schema.TypeInt, + Required: true, + }, + "proto": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"tcp", "udp"}, false), + }, + } +} + +func userAccessSubresourceSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "username": { + Type: schema.TypeString, + Required: true, + }, + "access_type": { + Type: schema.TypeString, + Required: true, + }, + } +} + +func snapshotSubresourceSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "label": { + Type: schema.TypeString, + Required: true, + }, + } +} + +func snapshotRollbackSubresourceSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "label": { + Type: schema.TypeString, + Required: true, + }, + } +} + +func cdSubresourceSchemaMake() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "cdrom_id": { + Type: schema.TypeInt, + Required: true, + }, + } +} + +func ResourceComputeSchemaMake() map[string]*schema.Schema { + rets := map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, ForceNew: true, + Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.", + }, + + "rg_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(1), + Description: "ID of the resource group where this compute should be deployed.", + }, + + "driver": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: statefuncs.StateFuncToUpper, + ValidateFunc: validation.StringInSlice([]string{"KVM_X86", "KVM_PPC"}, false), // observe case while validating + Description: "Hardware architecture of this compute instance.", + }, + + "cpu": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, constants.MaxCpusPerCompute), + Description: "Number of CPUs to allocate to this compute instance.", + }, + + "ram": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(constants.MinRamPerCompute), + Description: "Amount of RAM in MB to allocate to this compute instance.", + }, + + "image_id": { + Type: schema.TypeInt, + Required: true, + //ForceNew: true, //REDEPLOY Description: "ID of the OS image to base this compute instance on.", }, "boot_disk_size": { Type: schema.TypeInt, Optional: true, + Computed: true, Description: "This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image.", }, @@ -969,77 +1506,12 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema { "disks": { Type: schema.TypeList, - Computed: true, Optional: true, Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disk_name": { - Type: schema.TypeString, - Required: true, - Description: "Name for disk", - }, - "size": { - Type: schema.TypeInt, - Required: true, - Description: "Disk size in GiB", - }, - "disk_type": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"B", "D"}, false), - Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data'", - }, - "sep_id": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: "Storage endpoint provider ID; by default the same with boot disk", - }, - "shareable": { - Type: schema.TypeBool, - Computed: true, - }, - "size_max": { - Type: schema.TypeInt, - Computed: true, - }, - "size_used": { - Type: schema.TypeFloat, - Computed: true, - }, - "pool": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: "Pool name; by default will be chosen automatically", - }, - "desc": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: "Optional description", - }, - "image_id": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: "Specify image id for create disk from template", - }, - "disk_id": { - Type: schema.TypeInt, - Computed: true, - Description: "Disk ID", - }, - "permanently": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Disk deletion status", - }, - }, + Schema: disksSubresourceSchemaMake(), }, }, + "sep_id": { Type: schema.TypeInt, Optional: true, @@ -1089,6 +1561,62 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema { }, */ + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: tagsSubresourceSchemaMake(), + }, + }, + + "port_forwarding": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: portForwardingSubresourceSchemaMake(), + }, + }, + + "user_access": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: userAccessSubresourceSchemaMake(), + }, + }, + + "snapshot": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: snapshotSubresourceSchemaMake(), + }, + }, + + "rollback": { + Type: schema.TypeSet, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: snapshotRollbackSubresourceSchemaMake(), + }, + }, + + "cd": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: cdSubresourceSchemaMake(), + }, + }, + + "pin_to_stack": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "description": { Type: schema.TypeString, Optional: true, @@ -1099,8 +1627,6 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema { Type: schema.TypeString, Optional: true, Description: "Optional cloud_init parameters. Applied when creating new compute instance only, ignored in all other cases.", - //Default: "applied", - //DiffSuppressFunc: cloudInitDiffSupperss, }, "enabled": { @@ -1110,40 +1636,37 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema { Description: "If true - enable compute, else - disable", }, - // The rest are Compute properties, which are "computed" once it is created - "rg_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the resource group where this compute instance is located.", + "pause": { + Type: schema.TypeBool, + Optional: true, + Default: false, }, - "account_id": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the account this compute instance belongs to.", + "reset": { + Type: schema.TypeBool, + Optional: true, + Default: false, }, - "account_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the account this compute instance belongs to.", + "auto_start": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Flag for redeploy compute", }, - - "boot_disk_id": { - Type: schema.TypeInt, - Computed: true, - Description: "This compute instance boot disk ID.", + "force_stop": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Flag for redeploy compute", }, - - "os_users": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: osUsersSubresourceSchemaMake(), - }, - Description: "Guest OS users provisioned on this compute instance.", + "data_disks": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"KEEP", "DETACH", "DESTROY"}, false), + Default: "DETACH", + Description: "Flag for redeploy compute", }, - "started": { Type: schema.TypeBool, Optional: true, @@ -1170,6 +1693,227 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema { Optional: true, Description: "compute purpose", }, + + // The rest are Compute properties, which are "computed" once it is created + "account_id": { + Type: schema.TypeInt, + Computed: true, + Description: "ID of the account this compute instance belongs to.", + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the account this compute instance belongs to.", + }, + "affinity_weight": { + Type: schema.TypeInt, + Computed: true, + }, + "arch": { + Type: schema.TypeString, + Computed: true, + }, + "boot_order": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "boot_disk_id": { + Type: schema.TypeInt, + Computed: true, + Description: "This compute instance boot disk ID.", + }, + "clone_reference": { + Type: schema.TypeInt, + Computed: true, + }, + "clones": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "computeci_id": { + Type: schema.TypeInt, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "custom_fields": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Computed: true, + }, + "val": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "devices": { + Type: schema.TypeString, + Computed: true, + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "compute_id": { + Type: schema.TypeInt, + Computed: true, + }, + "interfaces": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeInterfacesSchemaMake(), + }, + }, + "lock_status": { + Type: schema.TypeString, + Computed: true, + }, + "manager_id": { + Type: schema.TypeInt, + Computed: true, + }, + "manager_type": { + Type: schema.TypeString, + Computed: true, + }, + "migrationjob": { + Type: schema.TypeInt, + Computed: true, + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + }, + "natable_vins_id": { + Type: schema.TypeInt, + Computed: true, + }, + "natable_vins_ip": { + Type: schema.TypeString, + Computed: true, + }, + "natable_vins_name": { + Type: schema.TypeString, + Computed: true, + }, + "natable_vins_network": { + Type: schema.TypeString, + Computed: true, + }, + "natable_vins_network_name": { + Type: schema.TypeString, + Computed: true, + }, + "os_users": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: osUsersSubresourceSchemaMake(), + }, + Description: "Guest OS users provisioned on this compute instance.", + }, + "pinned": { + Type: schema.TypeBool, + Computed: true, + }, + "reference_id": { + Type: schema.TypeString, + Computed: true, + }, + "registered": { + Type: schema.TypeBool, + Computed: true, + }, + "res_name": { + Type: schema.TypeString, + Computed: true, + }, + "rg_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the resource group where this compute instance is located.", + }, + "snap_sets": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: computeSnapSetsSchemaMake(), + }, + }, + "stateless_sep_id": { + Type: schema.TypeInt, + Computed: true, + }, + "stateless_sep_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "tech_status": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "user_managed": { + Type: schema.TypeBool, + Computed: true, + }, + "vgpus": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "virtual_image_id": { + Type: schema.TypeInt, + Computed: true, + }, + "virtual_image_name": { + Type: schema.TypeString, + Computed: true, + }, } return rets } diff --git a/internal/service/cloudapi/kvmvm/utility_compute.go b/internal/service/cloudapi/kvmvm/utility_compute.go index 967f2ad..9d7837e 100644 --- a/internal/service/cloudapi/kvmvm/utility_compute.go +++ b/internal/service/cloudapi/kvmvm/utility_compute.go @@ -37,7 +37,6 @@ import ( "encoding/json" "fmt" "net/url" - "strconv" "github.com/rudecs/terraform-provider-decort/internal/controller" log "github.com/sirupsen/logrus" @@ -242,90 +241,20 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData return nil } -func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) { - // This function tries to locate Compute by one of the following approaches: - // - if compute_id is specified - locate by compute ID - // - if compute_name is specified - locate by a combination of compute name and resource - // group ID - // - // If succeeded, it returns non-empty string that contains JSON formatted facts about the - // Compute as returned by compute/get API call. - // Otherwise it returns empty string and meaningful error. - // - // This function does not modify its ResourceData argument, so it is safe to use it as core - // method for resource's Exists method. - // - +func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (RecordCompute, error) { c := m.(*controller.ControllerCfg) urlValues := &url.Values{} + compute := &RecordCompute{} - // make it possible to use "read" & "check presence" functions with compute ID set so - // that Import of Compute resource is possible - idSet := false - theId, err := strconv.Atoi(d.Id()) - if err != nil || theId <= 0 { - computeId, argSet := d.GetOk("compute_id") // NB: compute_id is NOT present in computeResource schema! - if argSet { - theId = computeId.(int) - idSet = true - } - } else { - idSet = true - } - - if idSet { - // compute ID is specified, try to get compute instance straight by this ID - log.Debugf("utilityComputeCheckPresence: locating compute by its ID %d", theId) - urlValues.Add("computeId", fmt.Sprintf("%d", theId)) - computeFacts, err := c.DecortAPICall(ctx, "POST", ComputeGetAPI, urlValues) - if err != nil { - return "", err - } - return computeFacts, nil - } - - // ID was not set in the schema upon entering this function - work through Compute name - // and RG ID - computeName, argSet := d.GetOk("name") - if !argSet { - return "", fmt.Errorf("cannot locate compute instance if name is empty and no compute ID specified") - } - - rgId, argSet := d.GetOk("rg_id") - if !argSet { - return "", fmt.Errorf("cannot locate compute by name %s if no resource group ID is set", computeName.(string)) - } - - urlValues.Add("rgId", fmt.Sprintf("%d", rgId)) - apiResp, err := c.DecortAPICall(ctx, "POST", RgListComputesAPI, urlValues) + urlValues.Add("computeId", d.Id()) + computeRaw, err := c.DecortAPICall(ctx, "POST", ComputeGetAPI, urlValues) if err != nil { - return "", err + return *compute, err } - log.Debugf("utilityComputeCheckPresence: ready to unmarshal string %s", apiResp) - - computeList := RgListComputesResp{} - err = json.Unmarshal([]byte(apiResp), &computeList) + err = json.Unmarshal([]byte(computeRaw), &compute) if err != nil { - return "", err + return *compute, err } - - // log.Printf("%#v", computeList) - log.Debugf("utilityComputeCheckPresence: traversing decoded JSON of length %d", len(computeList)) - for index, item := range computeList { - // need to match Compute by name, skip Computes with the same name in DESTROYED satus - if item.Name == computeName.(string) && item.Status != "DESTROYED" { - log.Debugf("utilityComputeCheckPresence: index %d, matched name %s", index, item.Name) - // we found the Compute we need - now get detailed information via compute/get API - cgetValues := &url.Values{} - cgetValues.Add("computeId", fmt.Sprintf("%d", item.ID)) - apiResp, err = c.DecortAPICall(ctx, "POST", ComputeGetAPI, cgetValues) - if err != nil { - return "", err - } - return apiResp, nil - } - } - - return "", nil // there should be no error if Compute does not exist + return *compute, nil } diff --git a/internal/service/cloudapi/kvmvm/utility_compute_audits.go b/internal/service/cloudapi/kvmvm/utility_compute_audits.go new file mode 100644 index 0000000..5d38e23 --- /dev/null +++ b/internal/service/cloudapi/kvmvm/utility_compute_audits.go @@ -0,0 +1,29 @@ +package kvmvm + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityComputeAuditsCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListAudits, error) { + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + computeAudits := &ListAudits{} + + urlValues.Add("computeId", strconv.Itoa(d.Get("compute_id").(int))) + computeAuditsRaw, err := c.DecortAPICall(ctx, "POST", ComputeAuditsAPI, urlValues) + if err != nil { + return nil, err + } + + err = json.Unmarshal([]byte(computeAuditsRaw), &computeAudits) + if err != nil { + return nil, err + } + return *computeAudits, nil +} diff --git a/internal/service/cloudapi/kvmvm/utility_compute_boot_disk.go b/internal/service/cloudapi/kvmvm/utility_compute_boot_disk.go new file mode 100644 index 0000000..d43110e --- /dev/null +++ b/internal/service/cloudapi/kvmvm/utility_compute_boot_disk.go @@ -0,0 +1,23 @@ +package kvmvm + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityComputeBootDiskCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*ItemComputeDisk, error) { + compute, err := utilityComputeCheckPresence(ctx, d, m) + if err != nil { + return nil, err + } + + bootDisk := &ItemComputeDisk{} + for _, disk := range compute.Disks { + if disk.Name == "bootdisk" { + *bootDisk = disk + break + } + } + return bootDisk, nil +} diff --git a/internal/service/cloudapi/kvmvm/utility_compute_get_audits.go b/internal/service/cloudapi/kvmvm/utility_compute_get_audits.go new file mode 100644 index 0000000..5f2a109 --- /dev/null +++ b/internal/service/cloudapi/kvmvm/utility_compute_get_audits.go @@ -0,0 +1,29 @@ +package kvmvm + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityComputeGetAuditsCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListShortAudits, error) { + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + computeAudits := &ListShortAudits{} + + urlValues.Add("computeId", strconv.Itoa(d.Get("compute_id").(int))) + computeAuditsRaw, err := c.DecortAPICall(ctx, "POST", ComputeGetAuditsAPI, urlValues) + if err != nil { + return nil, err + } + + err = json.Unmarshal([]byte(computeAuditsRaw), &computeAudits) + if err != nil { + return nil, err + } + return *computeAudits, nil +} diff --git a/internal/service/cloudapi/kvmvm/utility_compute_get_console_url.go b/internal/service/cloudapi/kvmvm/utility_compute_get_console_url.go new file mode 100644 index 0000000..7691b88 --- /dev/null +++ b/internal/service/cloudapi/kvmvm/utility_compute_get_console_url.go @@ -0,0 +1,23 @@ +package kvmvm + +import ( + "context" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityComputeGetConsoleUrlCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) { + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + + urlValues.Add("computeId", strconv.Itoa(d.Get("compute_id").(int))) + computeConsoleUrlRaw, err := c.DecortAPICall(ctx, "POST", ComputeGetConsoleUrlAPI, urlValues) + if err != nil { + return "", err + } + + return string(computeConsoleUrlRaw), nil +} diff --git a/internal/service/cloudapi/kvmvm/utility_compute_get_log.go b/internal/service/cloudapi/kvmvm/utility_compute_get_log.go new file mode 100644 index 0000000..c282b3b --- /dev/null +++ b/internal/service/cloudapi/kvmvm/utility_compute_get_log.go @@ -0,0 +1,24 @@ +package kvmvm + +import ( + "context" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityComputeGetLogCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) { + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + + urlValues.Add("computeId", strconv.Itoa(d.Get("compute_id").(int))) + urlValues.Add("path", d.Get("path").(string)) + computeGetLogRaw, err := c.DecortAPICall(ctx, "POST", ComputeGetLogAPI, urlValues) + if err != nil { + return "", err + } + + return string(computeGetLogRaw), nil +} diff --git a/internal/service/cloudapi/kvmvm/utility_compute_list.go b/internal/service/cloudapi/kvmvm/utility_compute_list.go new file mode 100644 index 0000000..9641077 --- /dev/null +++ b/internal/service/cloudapi/kvmvm/utility_compute_list.go @@ -0,0 +1,39 @@ +package kvmvm + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityDataComputeListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListComputes, error) { + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + listComputes := &ListComputes{} + + if includeDeleted, ok := d.GetOk("includedeleted"); ok { + urlValues.Add("includeDeleted", strconv.FormatBool(includeDeleted.(bool))) + } + if page, ok := d.GetOk("page"); ok { + urlValues.Add("page", strconv.Itoa(page.(int))) + } + if size, ok := d.GetOk("size"); ok { + urlValues.Add("size", strconv.Itoa(size.(int))) + } + + listComputesRaw, err := c.DecortAPICall(ctx, "POST", ComputeListAPI, urlValues) + if err != nil { + return nil, err + } + + err = json.Unmarshal([]byte(listComputesRaw), &listComputes) + if err != nil { + return nil, err + } + return *listComputes, nil + +} diff --git a/internal/service/cloudapi/kvmvm/utility_compute_pfw_list.go b/internal/service/cloudapi/kvmvm/utility_compute_pfw_list.go new file mode 100644 index 0000000..ff8f617 --- /dev/null +++ b/internal/service/cloudapi/kvmvm/utility_compute_pfw_list.go @@ -0,0 +1,30 @@ +package kvmvm + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityComputePfwListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListPFWs, error) { + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + listPFWs := &ListPFWs{} + + urlValues.Add("computeId", strconv.Itoa(d.Get("compute_id").(int))) + computePfwListRaw, err := c.DecortAPICall(ctx, "POST", ComputePfwListAPI, urlValues) + if err != nil { + return nil, err + } + + err = json.Unmarshal([]byte(computePfwListRaw), &listPFWs) + if err != nil { + return nil, err + } + return *listPFWs, err + +} diff --git a/internal/service/cloudapi/kvmvm/utility_compute_user_list.go b/internal/service/cloudapi/kvmvm/utility_compute_user_list.go new file mode 100644 index 0000000..594783f --- /dev/null +++ b/internal/service/cloudapi/kvmvm/utility_compute_user_list.go @@ -0,0 +1,28 @@ +package kvmvm + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityComputeUserListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (RecordACL, error) { + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + userList := &RecordACL{} + + urlValues.Add("computeId", strconv.Itoa(d.Get("compute_id").(int))) + computeUserListRaw, err := c.DecortAPICall(ctx, "POST", ComputeUserListAPI, urlValues) + if err != nil { + return *userList, err + } + err = json.Unmarshal([]byte(computeUserListRaw), &userList) + if err != nil { + return *userList, err + } + return *userList, err +} diff --git a/internal/service/cloudapi/kvmvm/utility_data_source_compute.go b/internal/service/cloudapi/kvmvm/utility_data_source_compute.go new file mode 100644 index 0000000..ce743f0 --- /dev/null +++ b/internal/service/cloudapi/kvmvm/utility_data_source_compute.go @@ -0,0 +1,29 @@ +package kvmvm + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityDataComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (RecordCompute, error) { + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + compute := &RecordCompute{} + + urlValues.Add("computeId", strconv.Itoa(d.Get("compute_id").(int))) + computeRaw, err := c.DecortAPICall(ctx, "POST", ComputeGetAPI, urlValues) + if err != nil { + return *compute, err + } + + err = json.Unmarshal([]byte(computeRaw), &compute) + if err != nil { + return *compute, err + } + return *compute, nil +} diff --git a/internal/service/cloudapi/rg/api.go b/internal/service/cloudapi/rg/api.go index 4754be0..b18f768 100644 --- a/internal/service/cloudapi/rg/api.go +++ b/internal/service/cloudapi/rg/api.go @@ -32,9 +32,26 @@ Documentation: https://github.com/rudecs/terraform-provider-decort/wiki package rg -const ResgroupCreateAPI = "/restmachine/cloudapi/rg/create" -const ResgroupUpdateAPI = "/restmachine/cloudapi/rg/update" -const ResgroupListAPI = "/restmachine/cloudapi/rg/list" -const ResgroupGetAPI = "/restmachine/cloudapi/rg/get" -const ResgroupDeleteAPI = "/restmachine/cloudapi/rg/delete" -const RgListComputesAPI = "/restmachine/cloudapi/rg/listComputes" +const ( + ResgroupCreateAPI = "/restmachine/cloudapi/rg/create" + ResgroupUpdateAPI = "/restmachine/cloudapi/rg/update" + ResgroupListAPI = "/restmachine/cloudapi/rg/list" + ResgroupListDeletedAPI = "/restmachine/cloudapi/rg/listDeleted" + ResgroupListPfwAPI = "/restmachine/cloudapi/rg/listPFW" + ResgroupGetAPI = "/restmachine/cloudapi/rg/get" + ResgroupListVinsAPI = "/restmachine/cloudapi/rg/listVins" + ResgroupListLbAPI = "/restmachine/cloudapi/rg/listLb" + ResgroupDeleteAPI = "/restmachine/cloudapi/rg/delete" + RgListComputesAPI = "/restmachine/cloudapi/rg/listComputes" + RgAffinityGroupComputesAPI = "/restmachine/cloudapi/rg/affinityGroupComputes" + RgAffinityGroupsGetAPI = "/restmachine/cloudapi/rg/affinityGroupsGet" + RgAffinityGroupsListAPI = "/restmachine/cloudapi/rg/affinityGroupsList" + RgAuditsAPI = "/restmachine/cloudapi/rg/audits" + RgEnableAPI = "/restmachine/cloudapi/rg/enable" + RgDisableAPI = "/restmachine/cloudapi/rg/disable" + ResgroupUsageAPI = "/restmachine/cloudapi/rg/usage" + RgAccessGrantAPI = "/restmachine/cloudapi/rg/accessGrant" + RgAccessRevokeAPI = "/restmachine/cloudapi/rg/accessRevoke" + RgSetDefNetAPI = "/restmachine/cloudapi/rg/setDefNet" + RgRestoreAPI = "/restmachine/cloudapi/rg/restore" +) diff --git a/internal/service/cloudapi/rg/data_source_rg.go b/internal/service/cloudapi/rg/data_source_rg.go index 0b87630..3740ba3 100644 --- a/internal/service/cloudapi/rg/data_source_rg.go +++ b/internal/service/cloudapi/rg/data_source_rg.go @@ -34,257 +34,382 @@ package rg import ( "context" - "encoding/json" - "net/url" "strconv" "github.com/rudecs/terraform-provider-decort/internal/constants" - "github.com/rudecs/terraform-provider-decort/internal/controller" - - // "net/url" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func utilityDataResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*ResgroupGetResp, error) { - c := m.(*controller.ControllerCfg) - urlValues := &url.Values{} - rgData := &ResgroupGetResp{} - - urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int))) - rgRaw, err := c.DecortAPICall(ctx, "POST", ResgroupGetAPI, urlValues) - if err != nil { - return nil, err +func sepsSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "sep_id": { + Type: schema.TypeString, + Computed: true, + }, + "data_name": { + Type: schema.TypeString, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeInt, + Computed: true, + }, } - err = json.Unmarshal([]byte(rgRaw), rgData) - if err != nil { - return nil, err - } - return rgData, nil + return res } -func dataSourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - rg, err := utilityDataResgroupCheckPresence(ctx, d, m) - if err != nil { - d.SetId("") // ensure ID is empty in this case - return diag.FromErr(err) +func resourcesSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "current": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeInt, + Computed: true, + }, + "extips": { + Type: schema.TypeInt, + Computed: true, + }, + "exttraffic": { + Type: schema.TypeInt, + Computed: true, + }, + "gpu": { + Type: schema.TypeInt, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "seps": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sep_id": { + Type: schema.TypeString, + Computed: true, + }, + "map": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "reserved": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Computed: true, + }, + "disk_size": { + Type: schema.TypeFloat, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeInt, + Computed: true, + }, + "extips": { + Type: schema.TypeInt, + Computed: true, + }, + "exttraffic": { + Type: schema.TypeInt, + Computed: true, + }, + "gpu": { + Type: schema.TypeInt, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "seps": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sep_id": { + Type: schema.TypeString, + Computed: true, + }, + "map": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, } - return diag.FromErr(flattenDataResgroup(d, *rg)) + + return res } -func DataSourceResgroup() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, +func aclSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "explicit": { + Type: schema.TypeBool, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "right": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "user_group_id": { + Type: schema.TypeString, + Computed: true, + }, + } - ReadContext: dataSourceResgroupRead, + return res +} - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, +func resourceLimitsSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "cu_c": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_d": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_i": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_m": { + Type: schema.TypeFloat, + Computed: true, + }, + "cu_np": { + Type: schema.TypeFloat, + Computed: true, }, + "gpu_units": { + Type: schema.TypeFloat, + Computed: true, + }, + } - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: "Name of the resource group. Names are case sensitive and unique within the context of an account.", - }, + return res +} - "rg_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Unique ID of the resource group. If this ID is specified, then resource group name is ignored.", - }, +func dataSourceRgSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "rg_id": { + Type: schema.TypeInt, + Required: true, + }, + "reason": { + Type: schema.TypeString, + Optional: true, + }, - "account_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the account, which this resource group belongs to.", + "resources": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: resourcesSchemaMake(), }, - - "account_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Unique ID of the account, which this resource group belongs to.", + }, + "account_id": { + Type: schema.TypeInt, + Computed: true, + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + }, + "acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: aclSchemaMake(), }, - - "description": { - Type: schema.TypeString, - Computed: true, - Description: "User-defined text description of this resource group.", + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "def_net_id": { + Type: schema.TypeInt, + Computed: true, + }, + "def_net_type": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "desc": { + Type: schema.TypeString, + Computed: true, + }, + "dirty": { + Type: schema.TypeBool, + Computed: true, + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "lock_status": { + Type: schema.TypeString, + Computed: true, + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "register_computes": { + Type: schema.TypeBool, + Computed: true, + }, + "resource_limits": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: resourceLimitsSchemaMake(), }, - "gid": { - Type: schema.TypeInt, - Computed: true, - Description: "Unique ID of the grid, where this resource group is deployed.", + }, + "secret": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "vins": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, }, - "quota": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: quotaRgSubresourceSchemaMake(), // this is a dictionary - }, - Description: "Quota settings for this resource group.", + }, + "computes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, }, - - "def_net_type": { - Type: schema.TypeString, - Computed: true, - Description: "Type of the default network for this resource group.", + }, + "res_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, }, - - "def_net_id": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the default network for this resource group (if any).", + }, + "uniq_pools": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, }, + }, + } + return res +} - "resources": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "current": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeInt, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_name": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - "reserved": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeInt, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_name": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, +func dataSourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + rg, err := utilityDataResgroupCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") // ensure ID is empty in this case + return diag.FromErr(err) + } + d.SetId(strconv.Itoa(d.Get("rg_id").(int))) + flattenRg(d, *rg) + return nil +} - "status": { - Type: schema.TypeString, - Computed: true, - Description: "Current status of this resource group.", - }, +func DataSourceResgroup() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, - "vins": { - Type: schema.TypeList, // this is a list of ints - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "List of VINs deployed in this resource group.", - }, + ReadContext: dataSourceResgroupRead, - "vms": { - Type: schema.TypeList, //t his is a list of ints - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "List of computes deployed in this resource group.", - }, + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, }, + + Schema: dataSourceRgSchemaMake(), } } diff --git a/internal/service/cloudapi/rg/data_source_rg_affinity_group_computes.go b/internal/service/cloudapi/rg/data_source_rg_affinity_group_computes.go new file mode 100644 index 0000000..5b9868a --- /dev/null +++ b/internal/service/cloudapi/rg/data_source_rg_affinity_group_computes.go @@ -0,0 +1,108 @@ +package rg + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceRgAffinityGroupComputesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + rgComputes, err := utilityRgAffinityGroupComputesCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.Itoa(d.Get("rg_id").(int))) + d.Set("items", flattenRgAffinityGroupComputes(rgComputes)) + return nil +} + +func dataSourceRgAffinityGroupComputesSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "rg_id": { + Type: schema.TypeInt, + Required: true, + Description: "ID of the RG", + }, + "affinity_group": { + Type: schema.TypeString, + Required: true, + Description: "Affinity group label", + }, + + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "compute_id": { + Type: schema.TypeInt, + Computed: true, + }, + "other_node": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "other_node_indirect": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "other_node_indirect_soft": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "other_node_soft": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "same_node": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "same_node_soft": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + } + + return res +} + +func DataSourceRgAffinityGroupComputes() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceRgAffinityGroupComputesRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceRgAffinityGroupComputesSchemaMake(), + } +} diff --git a/internal/service/cloudapi/rg/data_source_rg_affinity_groups_get.go b/internal/service/cloudapi/rg/data_source_rg_affinity_groups_get.go new file mode 100644 index 0000000..316dd87 --- /dev/null +++ b/internal/service/cloudapi/rg/data_source_rg_affinity_groups_get.go @@ -0,0 +1,60 @@ +package rg + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceRgAffinityGroupsGetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + computes, err := utilityRgAffinityGroupsGetCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + d.SetId(strconv.Itoa(d.Get("rg_id").(int))) + d.Set("ids", computes) + return nil +} + +func dataSourceRgAffinityGroupsGetSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "rg_id": { + Type: schema.TypeInt, + Required: true, + Description: "ID of the RG", + }, + "affinity_group": { + Type: schema.TypeString, + Required: true, + Description: "Affinity group label", + }, + + "ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + } + + return res +} + +func DataSourceRgAffinityGroupsGet() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceRgAffinityGroupsGetRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceRgAffinityGroupsGetSchemaMake(), + } +} diff --git a/internal/service/cloudapi/rg/data_source_rg_affinity_groups_list.go b/internal/service/cloudapi/rg/data_source_rg_affinity_groups_list.go new file mode 100644 index 0000000..2c94b5a --- /dev/null +++ b/internal/service/cloudapi/rg/data_source_rg_affinity_groups_list.go @@ -0,0 +1,67 @@ +package rg + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceRgAffinityGroupsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + list, err := utilityRgAffinityGroupsListCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.Itoa(d.Get("rg_id").(int))) + d.Set("affinity_groups", flattenRgListGroups(list)) + return nil +} + +func dataSourceRgAffinityGroupsListSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "rg_id": { + Type: schema.TypeInt, + Required: true, + Description: "ID of the RG", + }, + "affinity_groups": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "label": { + Type: schema.TypeString, + Computed: true, + }, + "ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + } + + return res +} + +func DataSourceRgAffinityGroupsList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceRgAffinityGroupsListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceRgAffinityGroupsListSchemaMake(), + } +} diff --git a/internal/service/cloudapi/rg/data_source_rg_audits.go b/internal/service/cloudapi/rg/data_source_rg_audits.go new file mode 100644 index 0000000..61e4bdb --- /dev/null +++ b/internal/service/cloudapi/rg/data_source_rg_audits.go @@ -0,0 +1,77 @@ +package rg + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceRgAuditsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + rgAudits, err := utilityRgAuditsCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.Itoa(d.Get("rg_id").(int))) + d.Set("items", flattenRgAudits(rgAudits)) + + return nil +} + +func dataSourceRgAuditsSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "rg_id": { + Type: schema.TypeInt, + Required: true, + }, + + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "call": { + Type: schema.TypeString, + Computed: true, + }, + "responsetime": { + Type: schema.TypeFloat, + Computed: true, + }, + "statuscode": { + Type: schema.TypeInt, + Computed: true, + }, + "timestamp": { + Type: schema.TypeFloat, + Computed: true, + }, + "user": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + } + + return res +} + +func DataSourceRgAudits() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceRgAuditsRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceRgAuditsSchemaMake(), + } +} diff --git a/internal/service/cloudapi/rg/data_source_rg_list.go b/internal/service/cloudapi/rg/data_source_rg_list.go index cc7b3be..1a87533 100644 --- a/internal/service/cloudapi/rg/data_source_rg_list.go +++ b/internal/service/cloudapi/rg/data_source_rg_list.go @@ -41,73 +41,6 @@ import ( "github.com/rudecs/terraform-provider-decort/internal/constants" ) -func flattenRgList(rgl ResgroupListResp) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - for _, rg := range rgl { - temp := map[string]interface{}{ - "account_id": rg.AccountID, - "account_name": rg.AccountName, - "acl": flattenRgAcl(rg.ACLs), - "created_by": rg.CreatedBy, - "created_time": rg.CreatedTime, - "def_net_id": rg.DefaultNetID, - "def_net_type": rg.DefaultNetType, - "deleted_by": rg.DeletedBy, - "deleted_time": rg.DeletedTime, - "desc": rg.Decsription, - "gid": rg.GridID, - "guid": rg.GUID, - "rg_id": rg.ID, - "lock_status": rg.LockStatus, - "milestones": rg.Milestones, - "name": rg.Name, - "register_computes": rg.RegisterComputes, - "resource_limits": flattenRgResourceLimits(rg.ResourceLimits), - "secret": rg.Secret, - "status": rg.Status, - "updated_by": rg.UpdatedBy, - "updated_time": rg.UpdatedTime, - "vins": rg.Vins, - "vms": rg.Computes, - } - res = append(res, temp) - } - return res - -} - -func flattenRgAcl(rgAcls []AccountAclRecord) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - for _, rgAcl := range rgAcls { - temp := map[string]interface{}{ - "explicit": rgAcl.IsExplicit, - "guid": rgAcl.Guid, - "right": rgAcl.Rights, - "status": rgAcl.Status, - "type": rgAcl.Type, - "user_group_id": rgAcl.UgroupID, - } - res = append(res, temp) - } - return res -} - -func flattenRgResourceLimits(rl ResourceLimits) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - temp := map[string]interface{}{ - "cu_c": rl.CUC, - "cu_d": rl.CUD, - "cu_i": rl.CUI, - "cu_m": rl.CUM, - "cu_np": rl.CUNP, - "gpu_units": rl.GpuUnits, - } - res = append(res, temp) - - return res - -} - func dataSourceRgListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { rgList, err := utilityRgListCheckPresence(ctx, d, m) if err != nil { @@ -139,11 +72,19 @@ func dataSourceRgListSchemaMake() map[string]*schema.Schema { Optional: true, Description: "Page size", }, + "items": { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "account_acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: aclSchemaMake(), + }, + }, "account_id": { Type: schema.TypeInt, Computed: true, @@ -156,32 +97,7 @@ func dataSourceRgListSchemaMake() map[string]*schema.Schema { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "explicit": { - Type: schema.TypeBool, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "right": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "user_group_id": { - Type: schema.TypeString, - Computed: true, - }, - }, + Schema: aclSchemaMake(), }, }, "created_by": { @@ -212,6 +128,10 @@ func dataSourceRgListSchemaMake() map[string]*schema.Schema { Type: schema.TypeString, Computed: true, }, + "dirty": { + Type: schema.TypeBool, + Computed: true, + }, "gid": { Type: schema.TypeInt, Computed: true, @@ -244,32 +164,7 @@ func dataSourceRgListSchemaMake() map[string]*schema.Schema { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cu_c": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_d": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_i": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_m": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_np": { - Type: schema.TypeFloat, - Computed: true, - }, - "gpu_units": { - Type: schema.TypeFloat, - Computed: true, - }, - }, + Schema: resourceLimitsSchemaMake(), }, }, "secret": { @@ -302,6 +197,20 @@ func dataSourceRgListSchemaMake() map[string]*schema.Schema { Type: schema.TypeInt, }, }, + "resource_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "uniq_pools": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, }, }, }, diff --git a/internal/service/cloudapi/rg/data_source_rg_list_computes.go b/internal/service/cloudapi/rg/data_source_rg_list_computes.go new file mode 100644 index 0000000..19dca61 --- /dev/null +++ b/internal/service/cloudapi/rg/data_source_rg_list_computes.go @@ -0,0 +1,193 @@ +package rg + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceRgListComputesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + listComputes, err := utilityRgListComputesCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenRgListComputes(listComputes)) + return nil +} + +func rulesSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "key": { + Type: schema.TypeString, + Computed: true, + }, + "mode": { + Type: schema.TypeString, + Computed: true, + }, + "policy": { + Type: schema.TypeString, + Computed: true, + }, + "topology": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + } + + return res + +} + +func dataSourceRgListComputesSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "rg_id": { + Type: schema.TypeInt, + Required: true, + Description: "ID of the RG", + }, + "reason": { + Type: schema.TypeString, + Optional: true, + Description: "reason for action", + }, + + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeInt, + Computed: true, + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + }, + "affinity_label": { + Type: schema.TypeString, + Computed: true, + }, + "affinity_rules": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: rulesSchemaMake(), + }, + }, + "affinity_weight": { + Type: schema.TypeInt, + Computed: true, + }, + "antiaffinity_rules": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: rulesSchemaMake(), + }, + }, + "cpus": { + Type: schema.TypeInt, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "registered": { + Type: schema.TypeBool, + Computed: true, + }, + "rg_name": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "tech_status": { + Type: schema.TypeString, + Computed: true, + }, + "total_disks_size": { + Type: schema.TypeInt, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "user_managed": { + Type: schema.TypeBool, + Computed: true, + }, + "vins_connected": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + } + + return res +} + +func DataSourceRgListComputes() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceRgListComputesRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceRgListComputesSchemaMake(), + } +} diff --git a/internal/service/cloudapi/rg/data_source_rg_list_deleted.go b/internal/service/cloudapi/rg/data_source_rg_list_deleted.go new file mode 100644 index 0000000..ea2113a --- /dev/null +++ b/internal/service/cloudapi/rg/data_source_rg_list_deleted.go @@ -0,0 +1,196 @@ +package rg + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceRgListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + rgList, err := utilityRgListDeletedCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenRgList(rgList)) + + return nil +} + +func dataSourceRgListDeletedSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "Page number", + }, + "size": { + Type: schema.TypeInt, + Optional: true, + Description: "Page size", + }, + + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account_acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: aclSchemaMake(), + }, + }, + "account_id": { + Type: schema.TypeInt, + Computed: true, + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + }, + "acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: aclSchemaMake(), + }, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "def_net_id": { + Type: schema.TypeInt, + Computed: true, + }, + "def_net_type": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "desc": { + Type: schema.TypeString, + Computed: true, + }, + "dirty": { + Type: schema.TypeBool, + Computed: true, + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "rg_id": { + Type: schema.TypeInt, + Computed: true, + }, + "lock_status": { + Type: schema.TypeString, + Computed: true, + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "register_computes": { + Type: schema.TypeBool, + Computed: true, + }, + "resource_limits": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: resourceLimitsSchemaMake(), + }, + }, + "secret": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "vins": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "vms": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "resource_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "uniq_pools": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + } + return res +} + +func DataSourceRgListDeleted() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceRgListDeletedRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceRgListDeletedSchemaMake(), + } +} diff --git a/internal/service/cloudapi/rg/data_source_rg_list_lb.go b/internal/service/cloudapi/rg/data_source_rg_list_lb.go new file mode 100644 index 0000000..a162ba0 --- /dev/null +++ b/internal/service/cloudapi/rg/data_source_rg_list_lb.go @@ -0,0 +1,357 @@ +package rg + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceRgListLbRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + listLb, err := utilityRgListLbCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.Itoa(d.Get("rg_id").(int))) + d.Set("items", flattenRgListLb(listLb)) + return nil +} + +func serversSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + }, + "check": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + "server_settings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: serverSettingsSchemaMake(), + }, + }, + } + return res +} + +func serverSettingsSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "inter": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "down_inter": { + Type: schema.TypeInt, + Computed: true, + }, + "rise": { + Type: schema.TypeInt, + Computed: true, + }, + "fall": { + Type: schema.TypeInt, + Computed: true, + }, + "slow_start": { + Type: schema.TypeInt, + Computed: true, + }, + "max_conn": { + Type: schema.TypeInt, + Computed: true, + }, + "max_queue": { + Type: schema.TypeInt, + Computed: true, + }, + "weight": { + Type: schema.TypeInt, + Computed: true, + }, + } + return res +} + +func backendsSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "algorithm": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "server_default_settings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: serverSettingsSchemaMake(), + }, + }, + "servers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: serversSchemaMake(), + }, + }, + } + return res +} + +func bindingsSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "address": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + } + return res +} + +func frontendsSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "backend": { + Type: schema.TypeString, + Computed: true, + }, + "bindings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: bindingsSchemaMake(), + }, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + } + + return res +} + +func nodeSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "backend_ip": { + Type: schema.TypeString, + Computed: true, + }, + "compute_id": { + Type: schema.TypeInt, + Computed: true, + }, + "frontend_ip": { + Type: schema.TypeString, + Computed: true, + }, + "guid": { + Type: schema.TypeString, + Computed: true, + }, + "mgmt_ip": { + Type: schema.TypeString, + Computed: true, + }, + "network_id": { + Type: schema.TypeInt, + Computed: true, + }, + } + return res +} + +func dataSourceRgListLbSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "rg_id": { + Type: schema.TypeInt, + Required: true, + Description: "ID of the RG", + }, + + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ha_mode": { + Type: schema.TypeBool, + Computed: true, + }, + "acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: aclSchemaMake(), + }, + }, + "backends": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: backendsSchemaMake(), + }, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "desc": { + Type: schema.TypeString, + Computed: true, + }, + "dp_api_user": { + Type: schema.TypeString, + Computed: true, + }, + "extnet_id": { + Type: schema.TypeInt, + Computed: true, + }, + "frontends": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: frontendsSchemaMake(), + }, + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "image_id": { + Type: schema.TypeInt, + Computed: true, + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "primary_node": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: nodeSchemaMake(), + }, + }, + "rg_name": { + Type: schema.TypeString, + Computed: true, + }, + "secondary_node": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: nodeSchemaMake(), + }, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "tech_status": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + "vins_id": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + } + + return res +} + +func DataSourceRgListLb() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceRgListLbRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceRgListLbSchemaMake(), + } +} diff --git a/internal/service/cloudapi/rg/data_source_rg_list_pfw.go b/internal/service/cloudapi/rg/data_source_rg_list_pfw.go new file mode 100644 index 0000000..e7372bf --- /dev/null +++ b/internal/service/cloudapi/rg/data_source_rg_list_pfw.go @@ -0,0 +1,89 @@ +package rg + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceRgListPfwRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + listPfw, err := utilityRgListPfwCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.Itoa(d.Get("rg_id").(int))) + d.Set("items", flattenRgListPfw(listPfw)) + return nil +} + +func dataSourceRgListPfwSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "rg_id": { + Type: schema.TypeInt, + Required: true, + Description: "ID of the RG", + }, + + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "public_port_end": { + Type: schema.TypeInt, + Computed: true, + }, + "public_port_start": { + Type: schema.TypeInt, + Computed: true, + }, + "vm_id": { + Type: schema.TypeInt, + Computed: true, + }, + "vm_ip": { + Type: schema.TypeString, + Computed: true, + }, + "vm_name": { + Type: schema.TypeString, + Computed: true, + }, + "vm_port": { + Type: schema.TypeInt, + Computed: true, + }, + "vins_id": { + Type: schema.TypeInt, + Computed: true, + }, + "vins_name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + } + + return res +} + +func DataSourceRgListPfw() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceRgListPfwRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceRgListPfwSchemaMake(), + } +} diff --git a/internal/service/cloudapi/rg/data_source_rg_list_vins.go b/internal/service/cloudapi/rg/data_source_rg_list_vins.go new file mode 100644 index 0000000..6fc0bc6 --- /dev/null +++ b/internal/service/cloudapi/rg/data_source_rg_list_vins.go @@ -0,0 +1,126 @@ +package rg + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceRgListVinsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + listVins, err := utilityRgListVinsCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.Itoa(d.Get("rg_id").(int))) + d.Set("items", flattenRgListVins(listVins)) + return nil +} + +func dataSourceRgListVinsSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "rg_id": { + Type: schema.TypeInt, + Required: true, + Description: "ID of the RG", + }, + "reason": { + Type: schema.TypeString, + Optional: true, + Description: "Reason for action", + }, + + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeInt, + Computed: true, + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + }, + "computes": { + Type: schema.TypeInt, + Computed: true, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "external_ip": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "network": { + Type: schema.TypeString, + Computed: true, + }, + "pri_vnf_dev_id": { + Type: schema.TypeInt, + Computed: true, + }, + "rg_name": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + } + + return res +} + +func DataSourceRgListVins() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceRgListVinsRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceRgListVinsSchemaMake(), + } +} diff --git a/internal/service/cloudapi/rg/data_source_rg_usage.go b/internal/service/cloudapi/rg/data_source_rg_usage.go new file mode 100644 index 0000000..ec6c7b7 --- /dev/null +++ b/internal/service/cloudapi/rg/data_source_rg_usage.go @@ -0,0 +1,99 @@ +package rg + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceRgUsageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + usage, err := utilityDataRgUsageCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.Itoa(d.Get("rg_id").(int))) + flattenRgUsageResource(d, *usage) + return nil +} + +func dataSourceRgUsageSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "rg_id": { + Type: schema.TypeInt, + Required: true, + }, + "reason": { + Type: schema.TypeString, + Optional: true, + }, + + "cpu": { + Type: schema.TypeInt, + Computed: true, + }, + "disk_size": { + Type: schema.TypeInt, + Computed: true, + }, + "disk_size_max": { + Type: schema.TypeInt, + Computed: true, + }, + "extips": { + Type: schema.TypeInt, + Computed: true, + }, + "exttraffic": { + Type: schema.TypeInt, + Computed: true, + }, + "gpu": { + Type: schema.TypeInt, + Computed: true, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + }, + "seps": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sep_id": { + Type: schema.TypeString, + Computed: true, + }, + "map": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + } + + return res +} + +func DataSourceRgUsage() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceRgUsageRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceRgUsageSchemaMake(), + } +} diff --git a/internal/service/cloudapi/rg/flattens.go b/internal/service/cloudapi/rg/flattens.go index b6f8aa2..b08d431 100644 --- a/internal/service/cloudapi/rg/flattens.go +++ b/internal/service/cloudapi/rg/flattens.go @@ -33,13 +33,14 @@ Documentation: https://github.com/rudecs/terraform-provider-decort/wiki package rg import ( + "encoding/json" "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" log "github.com/sirupsen/logrus" ) -func flattenAccountSeps(seps map[string]map[string]ResourceSep) []map[string]interface{} { +func flattenAccountSeps(seps map[string]map[string]DiskUsage) []map[string]interface{} { res := make([]map[string]interface{}, 0) for sepKey, sepVal := range seps { for dataKey, dataVal := range sepVal { @@ -52,6 +53,7 @@ func flattenAccountSeps(seps map[string]map[string]ResourceSep) []map[string]int res = append(res, temp) } } + return res } @@ -59,14 +61,15 @@ func flattenAccResource(r Resource) []map[string]interface{} { res := make([]map[string]interface{}, 0) temp := map[string]interface{}{ "cpu": r.CPU, - "disksize": r.Disksize, - "extips": r.Extips, - "exttraffic": r.Exttraffic, + "disksize": r.DiskSize, + "extips": r.ExtIPs, + "exttraffic": r.ExtTraffic, "gpu": r.GPU, "ram": r.RAM, "seps": flattenAccountSeps(r.SEPs), } res = append(res, temp) + return res } @@ -80,36 +83,7 @@ func flattenRgResources(r Resources) []map[string]interface{} { return res } -func flattenDataResgroup(d *schema.ResourceData, details ResgroupGetResp) error { - // NOTE: this function modifies ResourceData argument - as such it should never be called - // from resourceRsgroupExists(...) method - // log.Debugf("%s", rg_facts) - - log.Debugf("flattenResgroup: decoded RG name %q / ID %d, account ID %d", - details.Name, details.ID, details.AccountID) - - d.SetId(fmt.Sprintf("%d", details.ID)) - d.Set("rg_id", details.ID) - d.Set("name", details.Name) - d.Set("account_name", details.AccountName) - d.Set("account_id", details.AccountID) - d.Set("gid", details.GridID) - d.Set("description", details.Desc) - d.Set("status", details.Status) - d.Set("def_net_type", details.DefaultNetType) - d.Set("def_net_id", details.DefaultNetID) - d.Set("resources", flattenRgResources(details.Resources)) - d.Set("vins", details.Vins) - d.Set("vms", details.Computes) - log.Debugf("flattenResgroup: calling flattenQuota()") - if err := d.Set("quota", parseQuota(details.Quota)); err != nil { - return err - } - - return nil -} - -func flattenResgroup(d *schema.ResourceData, details ResgroupGetResp) error { +func flattenResgroup(d *schema.ResourceData, details RecordResourceGroup) error { // NOTE: this function modifies ResourceData argument - as such it should never be called // from resourceRsgroupExists(...) method // log.Debugf("%s", rg_facts) @@ -124,22 +98,482 @@ func flattenResgroup(d *schema.ResourceData, details ResgroupGetResp) error { details.Name, details.ID, details.AccountID) d.SetId(fmt.Sprintf("%d", details.ID)) - d.Set("rg_id", details.ID) + + d.Set("account_id", details.AccountID) + d.Set("gid", details.GID) + d.Set("def_net_type", details.DefNetType) d.Set("name", details.Name) + + d.Set("resources", flattenRgResource(details.Resources)) d.Set("account_name", details.AccountName) - d.Set("account_id", details.AccountID) - d.Set("gid", details.GridID) - d.Set("description", details.Desc) - d.Set("status", details.Status) - d.Set("def_net_type", details.DefaultNetType) - d.Set("def_net_id", details.DefaultNetID) - d.Set("resources", flattenRgResources(details.Resources)) - d.Set("vins", details.Vins) + d.Set("acl", flattenRgAcl(details.ACL)) d.Set("vms", details.Computes) - log.Debugf("flattenResgroup: calling flattenQuota()") - if err := d.Set("quota", parseQuota(details.Quota)); err != nil { - return err - } + d.Set("created_by", details.CreatedBy) + d.Set("created_time", details.CreatedTime) + d.Set("def_net_id", details.DefNetID) + d.Set("deleted_by", details.DeletedBy) + d.Set("deleted_time", details.DeletedTime) + d.Set("description", details.Description) + d.Set("dirty", details.Dirty) + d.Set("guid", details.GUID) + d.Set("rg_id", details.ID) + d.Set("lock_status", details.LockStatus) + d.Set("milestones", details.Milestones) + d.Set("register_computes", details.RegisterComputes) + d.Set("res_types", details.ResTypes) + d.Set("secret", details.Secret) + d.Set("status", details.Status) + d.Set("updated_by", details.UpdatedBy) + d.Set("updated_time", details.UpdatedTime) + d.Set("uniq_pools", details.UniqPools) + d.Set("vins", details.VINS) return nil } + +func flattenRgSeps(seps map[string]map[string]DiskUsage) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for sepKey, sepVal := range seps { + SepMap := map[string]interface{}{} + for dataKey, dataVal := range sepVal { + val, _ := json.Marshal(dataVal) + SepMap[dataKey] = string(val) + } + temp := map[string]interface{}{ + "sep_id": sepKey, + "map": SepMap, + } + res = append(res, temp) + } + return res +} + +func flattenResource(resource Resource) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + + temp := map[string]interface{}{ + "cpu": resource.CPU, + "disk_size": resource.DiskSize, + "disk_size_max": resource.DiskSizeMax, + "extips": resource.ExtIPs, + "exttraffic": resource.ExtTraffic, + "gpu": resource.GPU, + "ram": resource.RAM, + "seps": flattenRgSeps(resource.SEPs), + } + + res = append(res, temp) + + return res +} + +func flattenRgResource(itemResource Resources) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "current": flattenResource(itemResource.Current), + "reserved": flattenResource(itemResource.Reserved), + } + + res = append(res, temp) + return res +} + +func flattenRg(d *schema.ResourceData, itemRg RecordResourceGroup) { + d.Set("resources", flattenRgResource(itemRg.Resources)) + d.Set("account_id", itemRg.AccountID) + d.Set("account_name", itemRg.AccountName) + d.Set("acl", flattenRgAcl(itemRg.ACL)) + d.Set("computes", itemRg.Computes) + d.Set("created_by", itemRg.CreatedBy) + d.Set("created_time", itemRg.CreatedTime) + d.Set("def_net_id", itemRg.DefNetID) + d.Set("def_net_type", itemRg.DefNetType) + d.Set("deleted_by", itemRg.DeletedBy) + d.Set("deleted_time", itemRg.DeletedTime) + d.Set("desc", itemRg.Description) + d.Set("dirty", itemRg.Dirty) + d.Set("gid", itemRg.GID) + d.Set("guid", itemRg.GUID) + d.Set("rg_id", itemRg.ID) + d.Set("lock_status", itemRg.LockStatus) + d.Set("milestones", itemRg.Milestones) + d.Set("name", itemRg.Name) + d.Set("register_computes", itemRg.RegisterComputes) + d.Set("res_types", itemRg.ResTypes) + d.Set("resource_limits", flattenRgResourceLimits(itemRg.ResourceLimits)) + d.Set("secret", itemRg.Secret) + d.Set("status", itemRg.Status) + d.Set("updated_by", itemRg.UpdatedBy) + d.Set("updated_time", itemRg.UpdatedTime) + d.Set("uniq_pools", itemRg.UniqPools) + d.Set("vins", itemRg.VINS) +} + +func flattenRgAudits(rgAudits ListAudits) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, rgAudit := range rgAudits { + temp := map[string]interface{}{ + "call": rgAudit.Call, + "responsetime": rgAudit.ResponseTime, + "statuscode": rgAudit.StatusCode, + "timestamp": rgAudit.Timestamp, + "user": rgAudit.User, + } + + res = append(res, temp) + } + + return res +} + +func flattenRgList(rgl ListResourceGroups) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, rg := range rgl { + temp := map[string]interface{}{ + "account_acl": flattenRgAcl(rg.ACL), + "account_id": rg.AccountID, + "account_name": rg.AccountName, + "acl": flattenRgAcl(rg.ACL), + "created_by": rg.CreatedBy, + "created_time": rg.CreatedTime, + "def_net_id": rg.DefNetID, + "def_net_type": rg.DefNetType, + "deleted_by": rg.DeletedBy, + "deleted_time": rg.DeletedTime, + "desc": rg.Description, + "dirty": rg.Dirty, + "gid": rg.GID, + "guid": rg.GUID, + "rg_id": rg.ID, + "lock_status": rg.LockStatus, + "milestones": rg.Milestones, + "name": rg.Name, + "register_computes": rg.RegisterComputes, + "resource_limits": flattenRgResourceLimits(rg.ResourceLimits), + "secret": rg.Secret, + "status": rg.Status, + "updated_by": rg.UpdatedBy, + "updated_time": rg.UpdatedTime, + "vins": rg.VINS, + "vms": rg.Computes, + "resource_types": rg.ResTypes, + "uniq_pools": rg.UniqPools, + } + res = append(res, temp) + } + return res + +} + +func flattenRgAcl(rgAcls ListACL) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, rgAcl := range rgAcls { + temp := map[string]interface{}{ + "explicit": rgAcl.Explicit, + "guid": rgAcl.GUID, + "right": rgAcl.Right, + "status": rgAcl.Status, + "type": rgAcl.Type, + "user_group_id": rgAcl.UserGroupID, + } + res = append(res, temp) + } + return res +} + +func flattenRgResourceLimits(rl ResourceLimits) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "cu_c": rl.CUC, + "cu_d": rl.CUD, + "cu_i": rl.CUI, + "cu_m": rl.CUM, + "cu_np": rl.CUNP, + "gpu_units": rl.GpuUnits, + } + res = append(res, temp) + + return res + +} + +func flattenRules(list ListRules) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, rule := range list { + temp := map[string]interface{}{ + "guid": rule.GUID, + "key": rule.Key, + "mode": rule.Mode, + "policy": rule.Policy, + "topology": rule.Topology, + "value": rule.Value, + } + + res = append(res, temp) + } + + return res +} + +func flattenRgListComputes(lc ListComputes) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, compute := range lc { + temp := map[string]interface{}{ + "account_id": compute.AccountID, + "account_name": compute.AccountName, + "affinity_label": compute.AffinityLabel, + "affinity_rules": flattenRules(compute.AffinityRules), + "affinity_weight": compute.AffinityWeight, + "antiaffinity_rules": flattenRules(compute.AntiAffinityRules), + "cpus": compute.CPUs, + "created_by": compute.CreatedBy, + "created_time": compute.CreatedTime, + "deleted_by": compute.DeletedBy, + "deleted_time": compute.DeletedTime, + "id": compute.ID, + "name": compute.Name, + "ram": compute.RAM, + "registered": compute.Registered, + "rg_name": compute.RGName, + "status": compute.Status, + "tech_status": compute.TechStatus, + "total_disks_size": compute.TotalDisksSize, + "updated_by": compute.DeletedBy, + "updated_time": compute.DeletedTime, + "user_managed": compute.UserManaged, + "vins_connected": compute.VINSConnected, + } + + res = append(res, temp) + } + + return res +} + +func flattenServerSettings(settings ServerSettings) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "inter": settings.Inter, + "guid": settings.GUID, + "down_inter": settings.DownInter, + "rise": settings.Rise, + "fall": settings.Fall, + "slow_start": settings.SlowStart, + "max_conn": settings.MaxConn, + "max_queue": settings.MaxQueue, + "weight": settings.Weight, + } + res = append(res, temp) + return res +} + +func flattenListServers(list ListServers) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, serv := range list { + temp := map[string]interface{}{ + "address": serv.Address, + "check": serv.Check, + "guid": serv.GUID, + "name": serv.Name, + "port": serv.Port, + "server_settings": flattenServerSettings(serv.ServerSettings), + } + res = append(res, temp) + } + + return res +} + +func flattenBackends(b ListBackends) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, item := range b { + temp := map[string]interface{}{ + "algorithm": item.Algorithm, + "guid": item.GUID, + "name": item.Name, + "server_default_settings": flattenServerSettings(item.ServerDefaultSettings), + "servers": flattenListServers(item.Servers), + } + res = append(res, temp) + } + return res +} + +func flattenBindings(list ListBindings) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, bind := range list { + temp := map[string]interface{}{ + "address": bind.Address, + "guid": bind.GUID, + "name": bind.Name, + "port": bind.Port, + } + res = append(res, temp) + } + + return res +} + +func flattenFrontends(list ListFrontends) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, front := range list { + temp := map[string]interface{}{ + "backend": front.Backend, + "bindings": flattenBindings(front.Bindings), + "guid": front.GUID, + "name": front.Name, + } + res = append(res, temp) + } + + return res +} + +func flattenNode(node RecordNode) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "backend_ip": node.BackendIP, + "compute_id": node.ComputeID, + "frontend_ip": node.FrontendIP, + "guid": node.GUID, + "mgmt_ip": node.MGMTIP, + "network_id": node.NetworkID, + } + res = append(res, temp) + return res +} + +func flattenRgListLb(listLb ListLB) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, lb := range listLb { + temp := map[string]interface{}{ + "ha_mode": lb.HAMode, + "acl": lb.ACL, + "backends": flattenBackends(lb.Backends), + "created_by": lb.CreatedBy, + "created_time": lb.CreatedTime, + "deleted_by": lb.DeletedBy, + "deleted_time": lb.DeletedTime, + "desc": lb.Description, + "dp_api_user": lb.DPAPIUser, + "extnet_id": lb.ExtNetID, + "frontends": flattenFrontends(lb.Frontends), + "gid": lb.GID, + "guid": lb.GUID, + "id": lb.ID, + "image_id": lb.ImageID, + "milestones": lb.Milestones, + "name": lb.Name, + "primary_node": flattenNode(lb.PrimaryNode), + "rg_name": lb.RGName, + "secondary_node": flattenNode(lb.SecondaryNode), + "status": lb.Status, + "tech_status": lb.TechStatus, + "updated_by": lb.UpdatedBy, + "updated_time": lb.UpdatedTime, + "vins_id": lb.VINSID, + } + res = append(res, temp) + } + return res +} + +func flattenRgListPfw(listPfw ListPFW) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, pfw := range listPfw { + temp := map[string]interface{}{ + "public_port_end": pfw.PublicPortEnd, + "public_port_start": pfw.PublicPortStart, + "vm_id": pfw.VMID, + "vm_ip": pfw.VMIP, + "vm_name": pfw.VMName, + "vm_port": pfw.VMPort, + "vins_id": pfw.VINSID, + "vins_name": pfw.VINSName, + } + res = append(res, temp) + } + + return res +} + +func flattenRgListVins(lv ListVINS) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, vins := range lv { + temp := map[string]interface{}{ + "account_id": vins.AccountID, + "account_name": vins.AccountName, + "computes": vins.Computes, + "created_by": vins.CreatedBy, + "created_time": vins.CreatedTime, + "deleted_by": vins.DeletedBy, + "deleted_time": vins.DeletedTime, + "external_ip": vins.ExternalIP, + "id": vins.ID, + "name": vins.Name, + "network": vins.Network, + "pri_vnf_dev_id": vins.PriVNFDevID, + "rg_name": vins.RGName, + "status": vins.Status, + "updated_by": vins.UpdatedBy, + "updated_time": vins.UpdatedTime, + } + + res = append(res, temp) + } + + return res +} + +func flattenRgAffinityGroupComputes(list ListAffinityGroupCompute) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + + for _, item := range list { + temp := map[string]interface{}{ + "compute_id": item.ComputeID, + "other_node": item.OtherNode, + "other_node_indirect": item.OtherNodeIndirect, + "other_node_indirect_soft": item.OtherNodeIndirectSoft, + "other_node_soft": item.OtherNodeSoft, + "same_node": item.SameNode, + "same_node_soft": item.SameNodeSoft, + } + res = append(res, temp) + } + + return res +} + +func flattenRgAffinityGroupsGet(list []uint64) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "items": list, + } + res = append(res, temp) + + return res +} + +func flattenRgListGroups(list map[string][]uint64) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for groupKey, groupVal := range list { + temp := map[string]interface{}{ + "label": groupKey, + "ids": groupVal, + } + res = append(res, temp) + } + + return res +} + +func flattenRgUsageResource(d *schema.ResourceData, usage Resource) { + d.Set("cpu", usage.CPU) + d.Set("disk_size", usage.DiskSize) + d.Set("disk_size_max", usage.DiskSizeMax) + d.Set("extips", usage.ExtIPs) + d.Set("exttraffic", usage.ExtTraffic) + d.Set("gpu", usage.GPU) + d.Set("ram", usage.RAM) + d.Set("seps", flattenRgSeps(usage.SEPs)) +} diff --git a/internal/service/cloudapi/rg/models.go b/internal/service/cloudapi/rg/models.go index a0fb4cc..370a73c 100644 --- a/internal/service/cloudapi/rg/models.go +++ b/internal/service/cloudapi/rg/models.go @@ -41,92 +41,6 @@ type ResourceLimits struct { GpuUnits float64 `json:"gpu_units"` } -type ResgroupRecord struct { - ACLs []AccountAclRecord `json:"acl"` - AccountID int `json:"accountId"` - AccountName string `json:"accountName"` - CreatedBy string `json:"createdBy"` - CreatedTime uint64 `json:"createdTime"` - DefaultNetID int `json:"def_net_id"` - DefaultNetType string `json:"def_net_type"` - DeletedBy string `json:"deletedBy"` - DeletedTime int `json:"deletedTime"` - Decsription string `json:"desc"` - GridID int `json:"gid"` - GUID int `json:"guid"` - ID uint `json:"id"` - LockStatus string `json:"lockStatus"` - Milestones int `json:"milestones"` - Name string `json:"name"` - RegisterComputes bool `json:"registerComputes"` - ResourceLimits ResourceLimits `json:"resourceLimits"` - Secret string `json:"secret"` - Status string `json:"status"` - UpdatedBy string `json:"updatedBy"` - UpdatedTime uint64 `json:"updatedTime"` - Vins []int `json:"vins"` - Computes []int `json:"vms"` -} - -type ResgroupListResp []ResgroupRecord - -type ResgroupUpdateParam struct { - RgId int `json:"rgId"` - Name string `json:"name"` - Desc string `json:"decs"` - Ram int `json:"maxMemoryCapacity"` - Disk int `json:"maxVDiskCapacity"` - Cpu int `json:"maxCPUCapacity"` - NetTraffic int `json:"maxNetworkPeerTransfer"` - Reason string `json:"reason"` -} - -type AccountAclRecord struct { - IsExplicit bool `json:"explicit"` - Guid string `json:"guid"` - Rights string `json:"right"` - Status string `json:"status"` - Type string `json:"type"` - UgroupID string `json:"userGroupId"` - CanBeDeleted bool `json:"canBeDeleted"` -} - -type ResgroupGetResp struct { - Resources Resources `json:"Resources"` - ACLs []UserAclRecord `json:"ACLs"` - //Usage UsageRecord `json:"Resources"` - AccountID int `json:"accountId"` - AccountName string `json:"accountName"` - GridID int `json:"gid"` - CreatedBy string `json:"createdBy"` - CreatedTime uint64 `json:"createdTime"` - DefaultNetID int `json:"def_net_id"` - DefaultNetType string `json:"def_net_type"` - DeletedBy string `json:"deletedBy"` - DeletedTime uint64 `json:"deletedTime"` - Desc string `json:"desc"` - ID uint `json:"id"` - LockStatus string `json:"lockStatus"` - Name string `json:"name"` - Quota QuotaRecord `json:"resourceLimits"` - Status string `json:"status"` - UpdatedBy string `json:"updatedBy"` - UpdatedTime uint64 `json:"updatedTime"` - Vins []int `json:"vins"` - Computes []int `json:"vms"` - - Ignored map[string]interface{} `json:"-"` -} - -type UserAclRecord struct { - IsExplicit bool `json:"explicit"` - Rights string `json:"right"` - Status string `json:"status"` - Type string `json:"type"` - UgroupID string `json:"userGroupId"` - // CanBeDeleted bool `json:"canBeDeleted"` -} - type QuotaRecord struct { // this is how quota is reported by /api/.../rg/get Cpu int `json:"CU_C"` // CPU count in pcs Ram float64 `json:"CU_M"` // RAM volume in MB, it is STILL reported as FLOAT @@ -136,36 +50,700 @@ type QuotaRecord struct { // this is how quota is reported by /api/.../rg/get GpuUnits int `json:"gpu_units"` // GPU count } -type ResourceRecord struct { // this is how actual usage is reported by /api/.../rg/get - Cpu int `json:"cpu"` - Disk float64 `json:"disksize"` - ExtIPs int `json:"extips"` - ExtTraffic int `json:"exttraffic"` - Gpu int `json:"gpu"` - Ram int `json:"ram"` -} +// Main information about audit +type ItemAudit struct { + // Call + Call string `json:"call"` -type UsageRecord struct { - Current ResourceRecord `json:"Current"` - Reserved ResourceRecord `json:"Reserved"` -} + // Response time + ResponseTime float64 `json:"responsetime"` -type ResourceSep struct { - DiskSize float64 `json:"disksize"` - DiskSizeMax int `json:"disksizemax"` + // Status code + StatusCode uint64 `json:"statuscode"` + + // Timestamp + Timestamp float64 `json:"timestamp"` + + // User + User string `json:"user"` } +// List of audits +type ListAudits []ItemAudit + +// Resources used type Resource struct { - CPU int `json:"cpu"` - Disksize float64 `json:"disksize"` - Extips int `json:"extips"` - Exttraffic int `json:"exttraffic"` - GPU int `json:"gpu"` - RAM int `json:"ram"` - SEPs map[string]map[string]ResourceSep `json:"seps"` + // Number of cores + CPU int64 `json:"cpu"` + + // Disk size + DiskSize float64 `json:"disksize"` + + // Max disk size + DiskSizeMax uint64 `json:"disksizemax"` + + // Number of External IPs + ExtIPs int64 `json:"extips"` + + // External traffic + ExtTraffic int64 `json:"exttraffic"` + + // Number of grafic cores + GPU int64 `json:"gpu"` + + // Number of RAM + RAM int64 `json:"ram"` + + // SEPs + SEPs map[string]map[string]DiskUsage `json:"seps"` } +// Disk usage +type DiskUsage struct { + // Disk size + DiskSize float64 `json:"disksize"` + + // Disk size max + DiskSizeMax float64 `json:"disksizemax"` +} + +// Information about resources type Resources struct { - Current Resource `json:"Current"` + // Current information about resources + Current Resource `json:"Current"` + + // Reserved information about resources Reserved Resource `json:"Reserved"` } + +// Detailed information about resource group +type RecordResourceGroup struct { + // Resources + Resources Resources `json:"Resources"` + + // Account ID + AccountID uint64 `json:"accountId"` + + // Account name + AccountName string `json:"accountName"` + + // Access Control List + ACL ListACL `json:"acl"` + + // Created by + CreatedBy string `json:"createdBy"` + + // Created time + CreatedTime uint64 `json:"createdTime"` + + // DefNetID + DefNetID int64 `json:"def_net_id"` + + // DefNetType + DefNetType string `json:"def_net_type"` + + // Deleted by + DeletedBy string `json:"deletedBy"` + + // Deleted time + DeletedTime uint64 `json:"deletedTime"` + + // Description + Description string `json:"desc"` + + // Dirty + Dirty bool `json:"dirty"` + + // Grid ID + GID uint64 `json:"gid"` + + // GUID + GUID uint64 `json:"guid"` + + // ID + ID uint64 `json:"id"` + + // Lock status + LockStatus string `json:"lockStatus"` + + // Milestones + Milestones uint64 `json:"milestones"` + + // Name + Name string `json:"name"` + + // RegisterComputes + RegisterComputes bool `json:"registerComputes"` + + // Resource limits + ResourceLimits ResourceLimits `json:"resourceLimits"` + + // Secret + Secret string `json:"secret"` + + // Status + Status string `json:"status"` + + // Updated by + UpdatedBy string `json:"updatedBy"` + + // Updated time + UpdatedTime uint64 `json:"updatedTime"` + + // List of VINS IDs + VINS []uint64 `json:"vins"` + + // List of compute IDs + Computes []uint64 `json:"vms"` + + // List of resource types + ResTypes []string `json:"resourceTypes"` + + // UniqPools + UniqPools []string `json:"uniqPools"` +} + +// Main information about Access Control List +type ItemACL struct { + // Explicit + Explicit bool `json:"explicit"` + + // GUID + GUID string `json:"guid"` + + // Right + Right string `json:"right"` + + // Status + Status string `json:"status"` + + // Type + Type string `json:"type"` + + // User group ID + UserGroupID string `json:"userGroupId"` +} + +// List ACL +type ListACL []ItemACL + +type ItemResourceGroup struct { + // + AccountACL ItemACL `json:"accountAcl"` + + // Account ID + AccountID uint64 `json:"accountId"` + + // Account name + AccountName string `json:"accountName"` + + // Access Control List + ACL ListACL `json:"acl"` + + // Created by + CreatedBy string `json:"createdBy"` + + // Created time + CreatedTime uint64 `json:"createdTime"` + + // DefNetID + DefNetID int64 `json:"def_net_id"` + + // DefNetType + DefNetType string `json:"def_net_type"` + + // Deleted by + DeletedBy string `json:"deletedBy"` + + // Deleted time + DeletedTime uint64 `json:"deletedTime"` + + // Description + Description string `json:"desc"` + + // Dirty + Dirty bool `json:"dirty"` + + // Grid ID + GID uint64 `json:"gid"` + + // GUID + GUID uint64 `json:"guid"` + + // ID + ID uint64 `json:"id"` + + // Lock status + LockStatus string `json:"lockStatus"` + + // Milestones + Milestones uint64 `json:"milestones"` + + // Name + Name string `json:"name"` + + // RegisterComputes + RegisterComputes bool `json:"registerComputes"` + + // Resource limits + ResourceLimits ResourceLimits `json:"resourceLimits"` + + // Secret + Secret string `json:"secret"` + + // Status + Status string `json:"status"` + + // Updated by + UpdatedBy string `json:"updatedBy"` + + // Updated time + UpdatedTime uint64 `json:"updatedTime"` + + // List of VINS IDs + VINS []uint64 `json:"vins"` + + // List of compute IDs + Computes []uint64 `json:"vms"` + + // List of resource types + ResTypes []string `json:"resourceTypes"` + + // UniqPools + UniqPools []string `json:"uniqPools"` +} + +// List of resource groups +type ListResourceGroups []ItemResourceGroup + +// Main information about affinity rule +type ItemRule struct { + // GUID + GUID string `json:"guid"` + + // Key + Key string `json:"key"` + + // Mode + Mode string `json:"mode"` + + // Policy + Policy string `json:"policy"` + + // Topology + Topology string `json:"topology"` + + // Value + Value string `json:"value"` +} + +// List rules +type ListRules []ItemRule + +// Main information about compute +type ItemCompute struct { + // Account ID + AccountID uint64 `json:"accountId"` + + // Account name + AccountName string `json:"accountName"` + + // Affinity label + AffinityLabel string `json:"affinityLabel"` + + // List affinity rules + AffinityRules ListRules `json:"affinityRules"` + + // Affinity weight + AffinityWeight uint64 `json:"affinityWeight"` + + // Anti affinity rules + AntiAffinityRules ListRules `json:"antiAffinityRules"` + + // Number of CPU + CPUs uint64 `json:"cpus"` + + // Created by + CreatedBy string `json:"createdBy"` + + // Created time + CreatedTime uint64 `json:"createdTime"` + + // Deleted by + DeletedBy string `json:"deletedBy"` + + // Deleted time + DeletedTime uint64 `json:"deletedTime"` + + // ID + ID uint64 `json:"id"` + + // Name + Name string `json:"name"` + + // Number of RAM + RAM uint64 `json:"ram"` + + // Registered + Registered bool `json:"registered"` + + // Resource group ID + RGID uint64 `json:"rgId"` + + // Resource group name + RGName string `json:"rgName"` + + // Status + Status string `json:"status"` + + // Tech status + TechStatus string `json:"techStatus"` + + // Total disks size + TotalDisksSize uint64 `json:"totalDisksSize"` + + // Updated by + UpdatedBy string `json:"updatedBy"` + + // Updated time + UpdatedTime uint64 `json:"updatedTime"` + + // User managed + UserManaged bool `json:"userManaged"` + + // VINS connected + VINSConnected uint64 `json:"vinsConnected"` +} + +// List computes +type ListComputes []ItemCompute + +// Main information about port forward +type ItemPFW struct { + // Public port end + PublicPortEnd uint64 `json:"Public Port End"` + + // Public port start + PublicPortStart uint64 `json:"Public Port Start"` + + // Virtual machine ID + VMID uint64 `json:"VM ID"` + + // Virtual machine IP + VMIP string `json:"VM IP"` + + // Virtual machine name + VMName string `json:"VM Name"` + + // Virtual machine port + VMPort uint64 `json:"VM Port"` + + // VINS ID + VINSID uint64 `json:"ViNS ID"` + + // VINS name + VINSName string `json:"ViNS Name"` +} + +// List PFWs +type ListPFW []ItemPFW + +// Main information about VINS +type ItemVINS struct { + // Account ID + AccountID uint64 `json:"accountId"` + + // Account name + AccountName string `json:"accountName"` + + // Computes + Computes uint64 `json:"computes"` + + // Created by + CreatedBy string `json:"createdBy"` + + // Created time + CreatedTime uint64 `json:"createdTime"` + + // Deleted by + DeletedBy string `json:"deletedBy"` + + // Deleted time + DeletedTime uint64 `json:"deletedTime"` + + // External IP + ExternalIP string `json:"externalIP"` + + // ID + ID uint64 `json:"id"` + + // Name + Name string `json:"name"` + + // Network + Network string `json:"network"` + + // PriVNFDev ID + PriVNFDevID uint64 `json:"priVnfDevId"` + + // Resource group ID + RGID uint64 `json:"rgId"` + + // Resource group name + RGName string `json:"rgName"` + + // Status + Status string `json:"status"` + + // Updated by + UpdatedBy string `json:"updatedBy"` + + // Updated time + UpdatedTime uint64 `json:"updatedTime"` +} + +// List VINSes +type ListVINS []ItemVINS + +// Server settings +type ServerSettings struct { + // Inter + Inter uint64 `json:"inter"` + + // GUID + GUID string `json:"guid"` + + // Down inter + DownInter uint64 `json:"downinter"` + + // Rise + Rise uint64 `json:"rise"` + + // Fall + Fall uint64 `json:"fall"` + + // Slow start + SlowStart uint64 `json:"slowstart"` + + // Max connections + MaxConn uint64 `json:"maxconn"` + + // Max queue + MaxQueue uint64 `json:"maxqueue"` + + // Weight + Weight uint64 `json:"weight"` +} + +// Main information about server +type ItemServer struct { + // Address + Address string `json:"address"` + + // Check + Check string `json:"check"` + + // GUID + GUID string `json:"guid"` + + // Name + Name string `json:"name"` + + // Port + Port uint64 `json:"port"` + + // Server settings + ServerSettings ServerSettings `json:"serverSettings"` +} + +// List of servers +type ListServers []ItemServer + +// Main information about backend +type ItemBackend struct { + // Algorithm + Algorithm string `json:"algorithm"` + + // GUID + GUID string `json:"guid"` + + // Name + Name string `json:"name"` + + // Server settings + ServerDefaultSettings ServerSettings `json:"serverDefaultSettings"` + + // List of servers + Servers ListServers `json:"servers"` +} + +// List of backends +type ListBackends []ItemBackend + +// Main information of binding +type ItemBinding struct { + // Address + Address string `json:"address"` + + // GUID + GUID string `json:"guid"` + + // Name + Name string `json:"name"` + + // Port + Port uint64 `json:"port"` +} + +// List of bindings +type ListBindings []ItemBinding + +// Main information about frontend +type ItemFrontend struct { + // Backend + Backend string `json:"backend"` + + // List of bindings + Bindings ListBindings `json:"bindings"` + + // GUID + GUID string `json:"guid"` + + // Name + Name string `json:"name"` +} + +// List of frontends +type ListFrontends []ItemFrontend + +// Main information about node +type RecordNode struct { + // Backend IP + BackendIP string `json:"backendIp"` + + // Compute ID + ComputeID uint64 `json:"computeId"` + + // Frontend IP + FrontendIP string `json:"frontendIp"` + + // GUID + GUID string `json:"guid"` + + // MGMT IP + MGMTIP string `json:"mgmtIp"` + + // Network ID + NetworkID uint64 `json:"networkId"` +} + +// Main information about load balancer +type ItemLB struct { + // HAMode + HAMode bool `json:"HAmode"` + + // List ACL + ACL ListACL `json:"acl"` + + // List backends + Backends ListBackends `json:"backends"` + + // Created by + CreatedBy string `json:"createdBy"` + + // Created time + CreatedTime uint64 `json:"createdTime"` + + // Deleted by + DeletedBy string `json:"deletedBy"` + + // Deleted time + DeletedTime uint64 `json:"deletedTime"` + + // Description + Description string `json:"desc"` + + // DPAPI user + DPAPIUser string `json:"dpApiUser"` + + // External network ID + ExtNetID uint64 `json:"extnetId"` + + // List of frontends + Frontends ListFrontends `json:"frontends"` + + // Grid ID + GID uint64 `json:"gid"` + + // GUID + GUID uint64 `json:"guid"` + + // ID + ID uint64 `json:"id"` + + // Image ID + ImageID uint64 `json:"imageId"` + + // Milestones + Milestones uint64 `json:"milestones"` + + // Name + Name string `json:"name"` + + // Primary node + PrimaryNode RecordNode `json:"primaryNode"` + + // Resource group ID + RGID uint64 `json:"rgId"` + + // Resource group name + RGName string `json:"rgName"` + + // Secondary node + SecondaryNode RecordNode `json:"secondaryNode"` + + // Status + Status string `json:"status"` + + // Tech status + TechStatus string `json:"techStatus"` + + // Updated by + UpdatedBy string `json:"updatedBy"` + + // Updated time + UpdatedTime uint64 `json:"updatedTime"` + + // VINS ID + VINSID uint64 `json:"vinsId"` +} + +// List load balancers +type ListLB []ItemLB + +// Main information about affinity group +type ItemAffinityGroupCompute struct { + // Compute ID + ComputeID uint64 `json:"computeId"` + + // Other node + OtherNode []uint64 `json:"otherNode"` + + // Other node indirect + OtherNodeIndirect []uint64 `json:"otherNodeIndirect"` + + // Other node indirect soft + OtherNodeIndirectSoft []uint64 `json:"otherNodeIndirectSoft"` + + // Other node soft + OtherNodeSoft []uint64 `json:"otherNodeSoft"` + + // Same node + SameNode []uint64 `json:"sameNode"` + + // Same node soft + SameNodeSoft []uint64 `json:"sameNodeSoft"` +} + +// List of affinity groups +type ListAffinityGroupCompute []ItemAffinityGroupCompute diff --git a/internal/service/cloudapi/rg/quota_subresource.go b/internal/service/cloudapi/rg/quota_subresource.go index b201887..39edfcd 100644 --- a/internal/service/cloudapi/rg/quota_subresource.go +++ b/internal/service/cloudapi/rg/quota_subresource.go @@ -45,32 +45,33 @@ func makeQuotaRecord(arg_list []interface{}) QuotaRecord { ExtIPs: -1, GpuUnits: -1, } - subres_data := arg_list[0].(map[string]interface{}) + if len(arg_list) != 0 { + subres_data := arg_list[0].(map[string]interface{}) - if subres_data["cpu"].(int) > 0 { - quota.Cpu = subres_data["cpu"].(int) - } + if subres_data["cpu"].(int) > 0 { + quota.Cpu = subres_data["cpu"].(int) + } - if subres_data["disk"].(int) > 0 { - quota.Disk = subres_data["disk"].(int) // Disk capacity ib GB - } + if subres_data["disk"].(int) > 0 { + quota.Disk = subres_data["disk"].(int) // Disk capacity ib GB + } - if subres_data["ram"].(float64) > 0 { - quota.Ram = subres_data["ram"].(float64) // RAM volume in MB, as float64! - } + if subres_data["ram"].(float64) > 0 { + quota.Ram = subres_data["ram"].(float64) // RAM volume in MB, as float64! + } - if subres_data["ext_traffic"].(int) > 0 { - quota.ExtTraffic = subres_data["ext_traffic"].(int) - } + if subres_data["ext_traffic"].(int) > 0 { + quota.ExtTraffic = subres_data["ext_traffic"].(int) + } - if subres_data["ext_ips"].(int) > 0 { - quota.ExtIPs = subres_data["ext_ips"].(int) - } + if subres_data["ext_ips"].(int) > 0 { + quota.ExtIPs = subres_data["ext_ips"].(int) + } - if subres_data["gpu_units"].(int) > 0 { - quota.GpuUnits = subres_data["gpu_units"].(int) + if subres_data["gpu_units"].(int) > 0 { + quota.GpuUnits = subres_data["gpu_units"].(int) + } } - return quota } diff --git a/internal/service/cloudapi/rg/resource_check_input_values.go b/internal/service/cloudapi/rg/resource_check_input_values.go new file mode 100644 index 0000000..0a42ad0 --- /dev/null +++ b/internal/service/cloudapi/rg/resource_check_input_values.go @@ -0,0 +1,111 @@ +package rg + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func existAccountID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + + urlValues := &url.Values{} + + accountList := []struct { + ID int `json:"id"` + }{} + + accountListAPI := "/restmachine/cloudapi/account/list" + + accountListRaw, err := c.DecortAPICall(ctx, "POST", accountListAPI, urlValues) + if err != nil { + return false, err + } + + err = json.Unmarshal([]byte(accountListRaw), &accountList) + if err != nil { + return false, err + } + + haveAccount := false + + myAccount := d.Get("account_id").(int) + for _, account := range accountList { + if account.ID == myAccount { + haveAccount = true + break + } + } + return haveAccount, nil +} +func existGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + + urlValues := &url.Values{} + + locationList := []struct { + GID int `json:"gid"` + }{} + + locationsListAPI := "/restmachine/cloudapi/locations/list" + + locationListRaw, err := c.DecortAPICall(ctx, "POST", locationsListAPI, urlValues) + if err != nil { + return false, err + } + + err = json.Unmarshal([]byte(locationListRaw), &locationList) + if err != nil { + return false, err + } + + haveGID := false + + myGID := d.Get("gid").(int) + for _, location := range locationList { + if location.GID == myGID { + haveGID = true + break + } + } + + return haveGID, nil +} +func existExtNetID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + c := m.(*controller.ControllerCfg) + + urlValues := &url.Values{} + urlValues.Add("accountId", strconv.Itoa(d.Get("account_id").(int))) + + listExtNet := []struct { + ID int `json:"id"` + }{} + + extNetListAPI := "/restmachine/cloudapi/extnet/list" + + listExtNetRaw, err := c.DecortAPICall(ctx, "POST", extNetListAPI, urlValues) + if err != nil { + return false, err + } + + err = json.Unmarshal([]byte(listExtNetRaw), &listExtNet) + if err != nil { + return false, err + } + + haveExtNet := false + + myExtNetID := d.Get("ext_net_id").(int) + for _, extNet := range listExtNet { + if extNet.ID == myExtNetID { + haveExtNet = true + break + } + } + return haveExtNet, nil +} + diff --git a/internal/service/cloudapi/rg/resource_rg.go b/internal/service/cloudapi/rg/resource_rg.go index 5e159ed..87e515b 100644 --- a/internal/service/cloudapi/rg/resource_rg.go +++ b/internal/service/cloudapi/rg/resource_rg.go @@ -40,10 +40,13 @@ import ( "github.com/rudecs/terraform-provider-decort/internal/constants" "github.com/rudecs/terraform-provider-decort/internal/controller" + "github.com/rudecs/terraform-provider-decort/internal/dc" "github.com/rudecs/terraform-provider-decort/internal/location" + "github.com/rudecs/terraform-provider-decort/internal/status" log "github.com/sirupsen/logrus" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) @@ -54,11 +57,14 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter // Valid account ID is required to create new resource group // obtain Account ID by account name - it should not be zero on success - rg_name, arg_set := d.GetOk("name") - if !arg_set { + rgName, argSet := d.GetOk("name") + if !argSet { return diag.FromErr(fmt.Errorf("Cannot create new RG: missing name.")) } + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + /* Current version of provider works with default grid id (same is true for disk resources) grid_id, arg_set := d.GetOk("grid_id") if !arg_set { @@ -72,99 +78,289 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter // all required parameters are set in the schema - we can continue with RG creation log.Debugf("resourceResgroupCreate: called for RG name %s, account ID %d", - rg_name.(string), d.Get("account_id").(int)) + rgName.(string), d.Get("account_id").(int)) + + // Check input values + // AccountID + haveAccount, err := existAccountID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveAccount { + return diag.Errorf("resourceResgroupCreate: can't create RG bacause AccountID %d not allowed or does not exist", d.Get("account_id").(int)) + } + // GID + haveGID, err := existGID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveGID { + return diag.Errorf("resourceResgroupCreate: can't create RG bacause GID %d not allowed or does not exist", d.Get("gid").(int)) + } + // ExtNetID + if _, ok := d.GetOk("ext_net_id"); ok { + haveExtNet, err := existExtNetID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveExtNet { + return diag.Errorf("resourceResgroupCreate: can't create RG bacause ExtNetID %d not allowed or does not exist", d.Get("ext_net_id").(int)) + } + } // quota settings are optional - set_quota := false - var quota_record QuotaRecord - arg_value, arg_set := d.GetOk("quota") - if arg_set { + setQuota := false + var quotaRecord QuotaRecord + argValue, argSet := d.GetOk("quota") + if argSet { log.Debugf("resourceResgroupCreate: setting Quota on RG requested") - quota_record = makeQuotaRecord(arg_value.([]interface{})) - set_quota = true + quotaRecord = makeQuotaRecord(argValue.([]interface{})) + setQuota = true } - c := m.(*controller.ControllerCfg) log.Debugf("resourceResgroupCreate: called by user %q for RG name %s, account ID %d", c.GetDecortUsername(), - rg_name.(string), d.Get("account_id").(int)) + rgName.(string), d.Get("account_id").(int)) - url_values := &url.Values{} - url_values.Add("accountId", fmt.Sprintf("%d", d.Get("account_id").(int))) - url_values.Add("name", rg_name.(string)) - url_values.Add("gid", fmt.Sprintf("%d", location.DefaultGridID)) // use default Grid ID, similar to disk resource mgmt convention - url_values.Add("owner", c.GetDecortUsername()) + urlValues = &url.Values{} + urlValues.Add("accountId", strconv.Itoa(d.Get("account_id").(int))) + urlValues.Add("name", rgName.(string)) + urlValues.Add("gid", strconv.Itoa(location.DefaultGridID)) // use default Grid ID, similar to disk resource mgmt convention + urlValues.Add("owner", c.GetDecortUsername()) // pass quota values as set - if set_quota { - url_values.Add("maxCPUCapacity", fmt.Sprintf("%d", quota_record.Cpu)) - url_values.Add("maxVDiskCapacity", fmt.Sprintf("%d", quota_record.Disk)) - url_values.Add("maxMemoryCapacity", fmt.Sprintf("%f", quota_record.Ram)) // RAM quota is float; this may change in the future - url_values.Add("maxNetworkPeerTransfer", fmt.Sprintf("%d", quota_record.ExtTraffic)) - url_values.Add("maxNumPublicIP", fmt.Sprintf("%d", quota_record.ExtIPs)) - // url_values.Add("???", fmt.Sprintf("%d", quota_record.GpuUnits)) + if setQuota { + urlValues.Add("maxCPUCapacity", strconv.Itoa(quotaRecord.Cpu)) + urlValues.Add("maxVDiskCapacity", strconv.Itoa(quotaRecord.Disk)) + urlValues.Add("maxMemoryCapacity", fmt.Sprintf("%f", quotaRecord.Ram)) // RAM quota is float; this may change in the future + urlValues.Add("maxNetworkPeerTransfer", strconv.Itoa(quotaRecord.ExtTraffic)) + urlValues.Add("maxNumPublicIP", strconv.Itoa(quotaRecord.ExtIPs)) } // parse and handle network settings - def_net_type, arg_set := d.GetOk("def_net_type") - if arg_set { - url_values.Add("def_net", def_net_type.(string)) // NOTE: in API default network type is set by "def_net" parameter + defNetType, argSet := d.GetOk("def_net_type") + if argSet { + urlValues.Add("def_net", defNetType.(string)) // NOTE: in API default network type is set by "def_net" parameter + } else { + d.Set("def_net_type", "PRIVATE") } - ipcidr, arg_set := d.GetOk("ipcidr") - if arg_set { - url_values.Add("ipcidr", ipcidr.(string)) + ipcidr, argSet := d.GetOk("ipcidr") + if argSet { + urlValues.Add("ipcidr", ipcidr.(string)) } - ext_net_id, arg_set := d.GetOk("ext_net_id") - if arg_set { - url_values.Add("extNetId", fmt.Sprintf("%d", ext_net_id.(int))) + description, argSet := d.GetOk("description") + if argSet { + urlValues.Add("desc", description.(string)) } - ext_ip, arg_set := d.GetOk("ext_ip") - if arg_set { - url_values.Add("extIp", ext_ip.(string)) + reason, argSet := d.GetOk("reason") + if argSet { + urlValues.Add("reason", reason.(string)) } - api_resp, err := c.DecortAPICall(ctx, "POST", ResgroupCreateAPI, url_values) + extNetId, argSet := d.GetOk("ext_net_id") + if argSet { + urlValues.Add("extNetId", strconv.Itoa(extNetId.(int))) + } + + extIp, argSet := d.GetOk("ext_ip") + if argSet { + urlValues.Add("extIp", extIp.(string)) + } + + apiResp, err := c.DecortAPICall(ctx, "POST", ResgroupCreateAPI, urlValues) if err != nil { return diag.FromErr(err) } + d.SetId(apiResp) // rg/create API returns ID of the newly creted resource group on success + + w := dc.Warnings{} + if access, ok := d.GetOk("access"); ok { + urlValues = &url.Values{} + var user, right string + + if access.(*schema.Set).Len() > 0 { + accessList := access.(*schema.Set).List() + for _, accessInterface := range accessList { + access := accessInterface.(map[string]interface{}) + user = access["user"].(string) + right = access["right"].(string) + + urlValues.Add("rgId", d.Id()) + urlValues.Add("user", user) + urlValues.Add("right", right) + if reason, ok := d.GetOk("reason"); ok { + urlValues.Add("reason", reason.(string)) + } + + _, err := c.DecortAPICall(ctx, "POST", RgAccessGrantAPI, urlValues) + if err != nil { + w.Add(err) + } + } + } - d.SetId(api_resp) // rg/create API returns ID of the newly creted resource group on success - // rg.ID, _ = strconv.Atoi(api_resp) - if !set_quota { - rg, err := utilityResgroupCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) + } + + if defNet, ok := d.GetOk("def_net"); ok { + urlValues := &url.Values{} + + if defNet.(*schema.Set).Len() > 0 { + defNetList := defNet.(*schema.Set).List() + defNetItem := defNetList[0].(map[string]interface{}) + + netType := defNetItem["net_type"].(string) + + urlValues.Add("rgId", d.Id()) + urlValues.Add("netType", netType) + + if netID, ok := defNetItem["net_id"]; ok { + urlValues.Add("netId", strconv.Itoa(netID.(int))) + } + if reason, ok := defNetItem["reason"]; ok { + urlValues.Add("reason", reason.(string)) + } + + _, err := c.DecortAPICall(ctx, "POST", RgSetDefNetAPI, urlValues) + if err != nil { + w.Add(err) + } + d.Set("def_net_type", netType) } - d.Set("quota", parseQuota(rg.Quota)) + } + + if enable, ok := d.GetOk("enable"); ok { + urlValues = &url.Values{} + + api := RgDisableAPI + enable := enable.(bool) + if enable { + api = RgEnableAPI + } + urlValues.Add("rgId", d.Id()) + + if reason, ok := d.GetOk("reason"); ok { + urlValues.Add("reason", reason.(string)) + } + + _, err := c.DecortAPICall(ctx, "POST", api, urlValues) + if err != nil { + w.Add(err) + } } // re-read newly created RG to make sure schema contains complete and up to date set of specifications - return resourceResgroupRead(ctx, d, m) + defer resourceResgroupRead(ctx, d, m) + return w.Get() } func resourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Debugf("resourceResgroupRead: called for RG name %s, account ID %d", d.Get("name").(string), d.Get("account_id").(int)) - rg_facts, err := utilityResgroupCheckPresence(ctx, d, m) + c := m.(*controller.ControllerCfg) + + rgFacts, err := utilityResgroupCheckPresence(ctx, d, m) if err != nil { - // if empty string is returned from utilityResgroupCheckPresence then there is no - // such resource group and err tells so - just return it to the calling party d.SetId("") // ensure ID is empty return diag.FromErr(err) } - return diag.FromErr(flattenResgroup(d, *rg_facts)) + switch rgFacts.Status { + case status.Modeled: + case status.Created: + case status.Enabled: + case status.Deleted: + urlValues := &url.Values{} + urlValues.Add("rgId", d.Id()) + _, err := c.DecortAPICall(ctx, "POST", RgRestoreAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + case status.Deleting: + case status.Destroyed: + d.SetId("") + return resourceResgroupCreate(ctx, d, m) + case status.Destroying: + case status.Disabled: + case status.Disabling: + case status.Enabled: + case status.Enabling: + } + + rgFacts, err = utilityResgroupCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") // ensure ID is empty + return diag.FromErr(err) + } + return diag.FromErr(flattenResgroup(d, *rgFacts)) } func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Debugf("resourceResgroupUpdate: called for RG name %s, account ID %d", d.Get("name").(string), d.Get("account_id").(int)) + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + + // Check input values + // AccountID + haveAccount, err := existAccountID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveAccount { + return diag.Errorf("resourceResgroupUpdate: can't create RG bacause AccountID %d not allowed or does not exist", d.Get("account_id").(int)) + } + // GID + haveGID, err := existGID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveGID { + return diag.Errorf("resourceResgroupUpdate: can't create RG bacause GID %d not allowed or does not exist", d.Get("gid").(int)) + } + // ExtNetID + if _, ok := d.GetOk("ext_net_id"); ok { + haveExtNet, err := existExtNetID(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + if !haveExtNet { + return diag.Errorf("resourceResgroupUpdate: can't create RG bacause ExtNetID %d not allowed or does not exist", d.Get("ext_net_id").(int)) + } + } + + rgFacts, err := utilityResgroupCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") // ensure ID is empty + return diag.FromErr(err) + } + + switch rgFacts.Status { + case status.Modeled: + case status.Created: + case status.Enabled: + case status.Deleted: + urlValues := &url.Values{} + urlValues.Add("rgId", d.Id()) + _, err := c.DecortAPICall(ctx, "POST", RgRestoreAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + case status.Deleting: + case status.Destroyed: + d.SetId("") + return resourceResgroupCreate(ctx, d, m) + case status.Destroying: + case status.Disabled: + case status.Disabling: + case status.Enabled: + case status.Enabling: + } /* NOTE: we do not allow changing the following attributes of an existing RG via terraform: - def_net_type - ipcidr @@ -173,6 +369,13 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter The following code fragment checks if any of these have been changed and generates error. */ + if ok := d.HasChange("def_net"); ok { + _, newDefNet := d.GetChange("def_net") + if newDefNet.(*schema.Set).Len() == 0 { + return diag.Errorf("resourceResgroupUpdate: block def_net must not be empty") + } + } + for _, attr := range []string{"def_net_type", "ipcidr", "ext_ip"} { attr_new, attr_old := d.GetChange("def_net_type") if attr_new.(string) != attr_old.(string) { @@ -180,78 +383,85 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter } } - attr_new, attr_old := d.GetChange("ext_net_id") - if attr_new.(int) != attr_old.(int) { + attrNew, attrOld := d.GetChange("ext_net_id") + if attrNew.(int) != attrOld.(int) { return diag.FromErr(fmt.Errorf("resourceResgroupUpdate: RG ID %s: changing ext_net_id for existing RG is not allowed", d.Id())) } - do_general_update := false // will be true if general RG update is necessary (API rg/update) + doGeneralUpdate := false // will be true if general RG update is necessary (API rg/update) - c := m.(*controller.ControllerCfg) - url_values := &url.Values{} - url_values.Add("rgId", d.Id()) + urlValues = &url.Values{} + urlValues.Add("rgId", d.Id()) - name_new, name_set := d.GetOk("name") - if name_set { + nameNew, nameSet := d.GetOk("name") + if nameSet { log.Debugf("resourceResgroupUpdate: name specified - looking for deltas from the old settings.") - name_old, _ := d.GetChange("name") - if name_old.(string) != name_new.(string) { - do_general_update = true - url_values.Add("name", name_new.(string)) + nameOld, _ := d.GetChange("name") + if nameOld.(string) != nameNew.(string) { + doGeneralUpdate = true + urlValues.Add("name", nameNew.(string)) } } - quota_value, quota_set := d.GetOk("quota") - if quota_set { + quotaValue, quotaSet := d.GetOk("quota") + if quotaSet { log.Debugf("resourceResgroupUpdate: quota specified - looking for deltas from the old quota.") - quotarecord_new := makeQuotaRecord(quota_value.([]interface{})) - quota_value_old, _ := d.GetChange("quota") // returns old as 1st, new as 2nd return value - quotarecord_old := makeQuotaRecord(quota_value_old.([]interface{})) - - if quotarecord_new.Cpu != quotarecord_old.Cpu { - do_general_update = true - log.Debugf("resourceResgroupUpdate: Cpu diff %d <- %d", quotarecord_new.Cpu, quotarecord_old.Cpu) - url_values.Add("maxCPUCapacity", fmt.Sprintf("%d", quotarecord_new.Cpu)) + quotarecordNew := makeQuotaRecord(quotaValue.([]interface{})) + quotaValueOld, _ := d.GetChange("quota") // returns old as 1st, new as 2nd return value + quotarecordOld := makeQuotaRecord(quotaValueOld.([]interface{})) + log.Debug(quotaValueOld, quotarecordNew) + + if quotarecordNew.Cpu != quotarecordOld.Cpu { + doGeneralUpdate = true + log.Debugf("resourceResgroupUpdate: Cpu diff %d <- %d", quotarecordNew.Cpu, quotarecordOld.Cpu) + urlValues.Add("maxCPUCapacity", strconv.Itoa(quotarecordNew.Cpu)) } - if quotarecord_new.Disk != quotarecord_old.Disk { - do_general_update = true - log.Debugf("resourceResgroupUpdate: Disk diff %d <- %d", quotarecord_new.Disk, quotarecord_old.Disk) - url_values.Add("maxVDiskCapacity", fmt.Sprintf("%d", quotarecord_new.Disk)) + if quotarecordNew.Disk != quotarecordOld.Disk { + doGeneralUpdate = true + log.Debugf("resourceResgroupUpdate: Disk diff %d <- %d", quotarecordNew.Disk, quotarecordOld.Disk) + urlValues.Add("maxVDiskCapacity", strconv.Itoa(quotarecordNew.Disk)) } - if quotarecord_new.Ram != quotarecord_old.Ram { // NB: quota on RAM is stored as float32, in units of MB - do_general_update = true - log.Debugf("resourceResgroupUpdate: Ram diff %f <- %f", quotarecord_new.Ram, quotarecord_old.Ram) - url_values.Add("maxMemoryCapacity", fmt.Sprintf("%f", quotarecord_new.Ram)) + if quotarecordNew.Ram != quotarecordOld.Ram { // NB: quota on RAM is stored as float32, in units of MB + doGeneralUpdate = true + log.Debugf("resourceResgroupUpdate: Ram diff %f <- %f", quotarecordNew.Ram, quotarecordOld.Ram) + urlValues.Add("maxMemoryCapacity", fmt.Sprintf("%f", quotarecordNew.Ram)) } - if quotarecord_new.ExtTraffic != quotarecord_old.ExtTraffic { - do_general_update = true - log.Debugf("resourceResgroupUpdate: ExtTraffic diff %d <- %d", quotarecord_new.ExtTraffic, quotarecord_old.ExtTraffic) - url_values.Add("maxNetworkPeerTransfer", fmt.Sprintf("%d", quotarecord_new.ExtTraffic)) + if quotarecordNew.ExtTraffic != quotarecordOld.ExtTraffic { + doGeneralUpdate = true + log.Debugf("resourceResgroupUpdate: ExtTraffic diff %d <- %d", quotarecordNew.ExtTraffic, quotarecordOld.ExtTraffic) + urlValues.Add("maxNetworkPeerTransfer", strconv.Itoa(quotarecordNew.ExtTraffic)) } - if quotarecord_new.ExtIPs != quotarecord_old.ExtIPs { - do_general_update = true - log.Debugf("resourceResgroupUpdate: ExtIPs diff %d <- %d", quotarecord_new.ExtIPs, quotarecord_old.ExtIPs) - url_values.Add("maxNumPublicIP", fmt.Sprintf("%d", quotarecord_new.ExtIPs)) + if quotarecordNew.ExtIPs != quotarecordOld.ExtIPs { + doGeneralUpdate = true + log.Debugf("resourceResgroupUpdate: ExtIPs diff %d <- %d", quotarecordNew.ExtIPs, quotarecordOld.ExtIPs) + urlValues.Add("maxNumPublicIP", strconv.Itoa(quotarecordNew.ExtIPs)) } + } else { + doGeneralUpdate = true + urlValues.Add("maxCPUCapacity", "-1") + urlValues.Add("maxVDiskCapacity", "-1") + urlValues.Add("maxMemoryCapacity", "-1") + urlValues.Add("maxNetworkPeerTransfer", "-1") + urlValues.Add("maxNumPublicIP", "-1") } - desc_new, desc_set := d.GetOk("description") - if desc_set { + descNew, descSet := d.GetOk("description") + if descSet { log.Debugf("resourceResgroupUpdate: description specified - looking for deltas from the old settings.") - desc_old, _ := d.GetChange("description") - if desc_old.(string) != desc_new.(string) { - do_general_update = true - url_values.Add("desc", desc_new.(string)) + descOld, _ := d.GetChange("description") + if descOld.(string) != descNew.(string) { + doGeneralUpdate = true + urlValues.Add("desc", descNew.(string)) } } - if do_general_update { + if doGeneralUpdate { log.Debugf("resourceResgroupUpdate: detected delta between new and old RG specs - updating the RG") - _, err := c.DecortAPICall(ctx, "POST", ResgroupUpdateAPI, url_values) + _, err := c.DecortAPICall(ctx, "POST", ResgroupUpdateAPI, urlValues) if err != nil { return diag.FromErr(err) } @@ -259,6 +469,94 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter log.Debugf("resourceResgroupUpdate: no difference between old and new state - no update on the RG will be done") } + urlValues = &url.Values{} + enableOld, enableNew := d.GetChange("enable") + if enableOld.(bool) && !enableNew.(bool) { + urlValues.Add("rgId", d.Id()) + _, err := c.DecortAPICall(ctx, "POST", RgDisableAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } else if !enableOld.(bool) && enableNew.(bool) { + urlValues.Add("rgId", d.Id()) + _, err := c.DecortAPICall(ctx, "POST", RgEnableAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + + urlValues = &url.Values{} + + oldSet, newSet := d.GetChange("access") + + deletedAccess := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + for _, deletedInterface := range deletedAccess { + deletedItem := deletedInterface.(map[string]interface{}) + + user := deletedItem["user"].(string) + + urlValues.Add("rgId", d.Id()) + urlValues.Add("user", user) + if reason, ok := d.GetOk("reason"); ok { + urlValues.Add("reason", reason.(string)) + } + + _, err := c.DecortAPICall(ctx, "POST", RgAccessRevokeAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + urlValues = &url.Values{} + } + + addedAccess := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + for _, addedInterface := range addedAccess { + addedItem := addedInterface.(map[string]interface{}) + + user := addedItem["user"].(string) + right := addedItem["right"].(string) + + urlValues.Add("rgId", d.Id()) + urlValues.Add("user", user) + urlValues.Add("right", right) + if reason, ok := d.GetOk("reason"); ok { + urlValues.Add("reason", reason.(string)) + } + + _, err := c.DecortAPICall(ctx, "POST", RgAccessGrantAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + urlValues = &url.Values{} + } + + if ok := d.HasChange("def_net"); ok { + oldDefNet, newDefNet := d.GetChange("def_net") + if newDefNet.(*schema.Set).Len() > 0 { + changedDefNet := (newDefNet.(*schema.Set).Difference(oldDefNet.(*schema.Set))).List() + for _, changedDefNetInterface := range changedDefNet { + + defNetItem := changedDefNetInterface.(map[string]interface{}) + + netType := defNetItem["net_type"].(string) + + urlValues.Add("rgId", d.Id()) + urlValues.Add("netType", netType) + + if netID, ok := defNetItem["net_id"]; ok { + urlValues.Add("netId", strconv.Itoa(netID.(int))) + } + if reason, ok := defNetItem["reason"]; ok { + urlValues.Add("reason", reason.(string)) + } + + _, err := c.DecortAPICall(ctx, "POST", RgSetDefNetAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + } + } + } + return resourceResgroupRead(ctx, d, m) } @@ -269,20 +567,20 @@ func resourceResgroupDelete(ctx context.Context, d *schema.ResourceData, m inter d.Get("name").(string), d.Get("account_id").(int)) c := m.(*controller.ControllerCfg) - url_values := &url.Values{} + urlValues := &url.Values{} - url_values.Add("rgId", d.Id()) + urlValues.Add("rgId", d.Id()) if force, ok := d.GetOk("force"); ok { - url_values.Add("force", strconv.FormatBool(force.(bool))) + urlValues.Add("force", strconv.FormatBool(force.(bool))) } if permanently, ok := d.GetOk("permanently"); ok { - url_values.Add("permanently", strconv.FormatBool(permanently.(bool))) + urlValues.Add("permanently", strconv.FormatBool(permanently.(bool))) } if reason, ok := d.GetOk("reason"); ok { - url_values.Add("reason", reason.(string)) + urlValues.Add("reason", reason.(string)) } - _, err := c.DecortAPICall(ctx, "POST", ResgroupDeleteAPI, url_values) + _, err := c.DecortAPICall(ctx, "POST", ResgroupDeleteAPI, urlValues) if err != nil { return diag.FromErr(err) } @@ -315,7 +613,7 @@ func ResourceRgSchemaMake() map[string]*schema.Schema { "def_net_type": { Type: schema.TypeString, Optional: true, - Default: "PRIVATE", + Computed: true, ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC", "NONE"}, false), Description: "Type of the network, which this resource group will use as default for its computes - PRIVATE or PUBLIC or NONE.", }, @@ -348,7 +646,6 @@ func ResourceRgSchemaMake() map[string]*schema.Schema { "quota": { Type: schema.TypeList, Optional: true, - Computed: true, MaxItems: 1, Elem: &schema.Resource{ Schema: quotaRgSubresourceSchemaMake(), @@ -356,9 +653,61 @@ func ResourceRgSchemaMake() map[string]*schema.Schema { Description: "Quota settings for this resource group.", }, + "access": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "user": { + Type: schema.TypeString, + Required: true, + Description: "User or group name to grant access", + }, + "right": { + Type: schema.TypeString, + Required: true, + Description: "Access rights to set, one of 'R', 'RCX' or 'ARCXDU'", + }, + "reason": { + Type: schema.TypeString, + Optional: true, + Description: "Reason for action", + }, + }, + }, + }, + + "def_net": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "net_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC"}, false), + Description: "Network type to set. Must be on of 'PRIVATE' or 'PUBLIC'.", + }, + "net_id": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Network segment ID. If netType is PUBLIC and netId is 0 then default external network segment will be selected. If netType is PRIVATE and netId=0, the first ViNS defined for this RG will be selected. Otherwise, netId identifies either existing external network segment or ViNS.", + }, + "reason": { + Type: schema.TypeString, + Optional: true, + Description: "Reason for action", + }, + }, + }, + }, + "description": { Type: schema.TypeString, Optional: true, + Computed: true, Description: "User-defined text description of this resource group.", }, "force": { @@ -378,140 +727,89 @@ func ResourceRgSchemaMake() map[string]*schema.Schema { Optional: true, Description: "Set to True if you want force delete non-empty RG", }, + "register_computes": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Register computes in registration system", + }, + + "enable": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "flag for enable/disable RG", + }, "account_name": { Type: schema.TypeString, Computed: true, Description: "Name of the account, which this resource group belongs to.", }, - "resources": { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "current": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeInt, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_name": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - "reserved": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeInt, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_name": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - }, + Schema: resourcesSchemaMake(), }, }, + "acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: aclSchemaMake(), + }, + }, + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + }, + "deleted_by": { + Type: schema.TypeString, + Computed: true, + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + }, + "dirty": { + Type: schema.TypeBool, + Computed: true, + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + }, + "lock_status": { + Type: schema.TypeString, + Computed: true, + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + }, + "secret": { + Type: schema.TypeString, + Computed: true, + }, "status": { Type: schema.TypeString, Computed: true, Description: "Current status of this resource group.", }, - + "updated_by": { + Type: schema.TypeString, + Computed: true, + }, + "updated_time": { + Type: schema.TypeInt, + Computed: true, + }, "vins": { Type: schema.TypeList, //this is a list of ints Computed: true, @@ -530,13 +828,19 @@ func ResourceRgSchemaMake() map[string]*schema.Schema { Description: "List of computes deployed in this resource group.", }, - "computes": { - Type: schema.TypeList, //this is a list of ints + "res_types": { + Type: schema.TypeList, Computed: true, Elem: &schema.Schema{ - Type: schema.TypeInt, + Type: schema.TypeString, + }, + }, + "uniq_pools": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, }, - Description: "List of computes deployed in this resource group.", }, } } @@ -563,5 +867,23 @@ func ResourceResgroup() *schema.Resource { }, Schema: ResourceRgSchemaMake(), + CustomizeDiff: customdiff.All( + customdiff.IfValueChange("def_net", + func(ctx context.Context, oldValue, newValue, meta interface{}) bool { + return true + }, + func(ctx context.Context, d *schema.ResourceDiff, m interface{}) error { + oldValue, newValue := d.GetChange("def_net") + + old := len(oldValue.(*schema.Set).List()) + new_ := len(newValue.(*schema.Set).List()) + + if old == 1 && new_ == 0 { + return fmt.Errorf("CustomizeDiff: block def_net must not be empty") + } + return nil + }, + ), + ), } } diff --git a/internal/service/cloudapi/rg/utility_rg.go b/internal/service/cloudapi/rg/utility_rg.go index cd1be28..f6eeb1c 100644 --- a/internal/service/cloudapi/rg/utility_rg.go +++ b/internal/service/cloudapi/rg/utility_rg.go @@ -45,7 +45,7 @@ import ( // On success this function returns a string, as returned by API rg/get, which could be unmarshalled // into ResgroupGetResp structure -func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*ResgroupGetResp, error) { +func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*RecordResourceGroup, error) { // This function tries to locate resource group by one of the following algorithms depending // on the parameters passed: // - if resource group ID is specified -> by RG ID @@ -71,8 +71,34 @@ func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m } else { urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int))) } + if reason, ok := d.GetOk("reason"); ok { + urlValues.Add("reason", reason.(string)) + } + + rgData := &RecordResourceGroup{} + rgRaw, err := c.DecortAPICall(ctx, "POST", ResgroupGetAPI, urlValues) + if err != nil { + return nil, err + } + + err = json.Unmarshal([]byte(rgRaw), rgData) + if err != nil { + return nil, err + } + return rgData, nil +} + +func utilityDataResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*RecordResourceGroup, error) { + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + rgData := &RecordResourceGroup{} + + urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int))) + + if reason, ok := d.GetOk("reason"); ok { + urlValues.Add("reason", reason.(string)) + } - rgData := &ResgroupGetResp{} rgRaw, err := c.DecortAPICall(ctx, "POST", ResgroupGetAPI, urlValues) if err != nil { return nil, err diff --git a/internal/service/cloudapi/rg/utility_rg_affinity_group_computes.go b/internal/service/cloudapi/rg/utility_rg_affinity_group_computes.go new file mode 100644 index 0000000..fa9a787 --- /dev/null +++ b/internal/service/cloudapi/rg/utility_rg_affinity_group_computes.go @@ -0,0 +1,32 @@ +package rg + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityRgAffinityGroupComputesCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListAffinityGroupCompute, error) { + c := m.(*controller.ControllerCfg) + + urlValues := &url.Values{} + listGroupComputes := ListAffinityGroupCompute{} + + urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int))) + urlValues.Add("affinityGroup", d.Get("affinity_group").(string)) + + listGroupComputesRaw, err := c.DecortAPICall(ctx, "POST", RgAffinityGroupComputesAPI, urlValues) + if err != nil { + return nil, err + } + err = json.Unmarshal([]byte(listGroupComputesRaw), &listGroupComputes) + if err != nil { + return nil, err + } + + return listGroupComputes, nil +} diff --git a/internal/service/cloudapi/rg/utility_rg_affinity_groups_get.go b/internal/service/cloudapi/rg/utility_rg_affinity_groups_get.go new file mode 100644 index 0000000..94d0ec3 --- /dev/null +++ b/internal/service/cloudapi/rg/utility_rg_affinity_groups_get.go @@ -0,0 +1,33 @@ +package rg + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityRgAffinityGroupsGetCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) ([]uint64, error) { + c := m.(*controller.ControllerCfg) + + urlValues := &url.Values{} + computes := make([]uint64, 0) + + urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int))) + urlValues.Add("affinityGroup", d.Get("affinity_group").(string)) + + computesRaw, err := c.DecortAPICall(ctx, "POST", RgAffinityGroupsGetAPI, urlValues) + if err != nil { + return nil, err + } + + err = json.Unmarshal([]byte(computesRaw), &computes) + if err != nil { + return nil, err + } + + return computes, nil +} diff --git a/internal/service/cloudapi/rg/utility_rg_affinity_groups_list.go b/internal/service/cloudapi/rg/utility_rg_affinity_groups_list.go new file mode 100644 index 0000000..3c0be2b --- /dev/null +++ b/internal/service/cloudapi/rg/utility_rg_affinity_groups_list.go @@ -0,0 +1,32 @@ +package rg + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityRgAffinityGroupsListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (map[string][]uint64, error) { + c := m.(*controller.ControllerCfg) + + urlValues := &url.Values{} + groups := make(map[string][]uint64, 0) + + urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int))) + + groupsRaw, err := c.DecortAPICall(ctx, "POST", RgAffinityGroupsListAPI, urlValues) + if err != nil { + return nil, err + } + + err = json.Unmarshal([]byte(groupsRaw), &groups) + if err != nil { + return nil, err + } + + return groups, nil +} diff --git a/internal/service/cloudapi/rg/utility_rg_audits.go b/internal/service/cloudapi/rg/utility_rg_audits.go new file mode 100644 index 0000000..7c0da97 --- /dev/null +++ b/internal/service/cloudapi/rg/utility_rg_audits.go @@ -0,0 +1,30 @@ +package rg + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityRgAuditsCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListAudits, error) { + c := m.(*controller.ControllerCfg) + + urlValues := &url.Values{} + rgAudits := ListAudits{} + + urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int))) + rgAuditsRow, err := c.DecortAPICall(ctx, "POST", RgAuditsAPI, urlValues) + if err != nil { + return nil, err + } + err = json.Unmarshal([]byte(rgAuditsRow), &rgAudits) + if err != nil { + return nil, err + } + + return rgAudits, nil +} diff --git a/internal/service/cloudapi/rg/utility_rg_list.go b/internal/service/cloudapi/rg/utility_rg_list.go index 8b88880..cdaa870 100644 --- a/internal/service/cloudapi/rg/utility_rg_list.go +++ b/internal/service/cloudapi/rg/utility_rg_list.go @@ -44,11 +44,11 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func utilityRgListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ResgroupListResp, error) { +func utilityRgListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListResourceGroups, error) { c := m.(*controller.ControllerCfg) urlValues := &url.Values{} - rgList := ResgroupListResp{} + rgList := ListResourceGroups{} if size, ok := d.GetOk("size"); ok { urlValues.Add("size", strconv.Itoa(size.(int))) diff --git a/internal/service/cloudapi/rg/utility_rg_list_computes.go b/internal/service/cloudapi/rg/utility_rg_list_computes.go new file mode 100644 index 0000000..bb87e81 --- /dev/null +++ b/internal/service/cloudapi/rg/utility_rg_list_computes.go @@ -0,0 +1,35 @@ +package rg + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityRgListComputesCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListComputes, error) { + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + + listComputes := ListComputes{} + + urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int))) + if reason, ok := d.GetOk("reason"); ok { + urlValues.Add("reason", reason.(string)) + } + + listComputesRaw, err := c.DecortAPICall(ctx, "POST", RgListComputesAPI, urlValues) + if err != nil { + return nil, err + } + + err = json.Unmarshal([]byte(listComputesRaw), &listComputes) + if err != nil { + return nil, err + } + + return listComputes, nil +} diff --git a/internal/service/cloudapi/rg/utility_rg_list_deleted.go b/internal/service/cloudapi/rg/utility_rg_list_deleted.go new file mode 100644 index 0000000..c0e691d --- /dev/null +++ b/internal/service/cloudapi/rg/utility_rg_list_deleted.go @@ -0,0 +1,37 @@ +package rg + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityRgListDeletedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListResourceGroups, error) { + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + + rgList := ListResourceGroups{} + + if size, ok := d.GetOk("size"); ok { + urlValues.Add("size", strconv.Itoa(size.(int))) + } + if page, ok := d.GetOk("page"); ok { + urlValues.Add("page", strconv.Itoa(page.(int))) + } + + rgListRaw, err := c.DecortAPICall(ctx, "POST", ResgroupListDeletedAPI, urlValues) + if err != nil { + return nil, err + } + + err = json.Unmarshal([]byte(rgListRaw), &rgList) + if err != nil { + return nil, err + } + + return rgList, nil +} diff --git a/internal/service/cloudapi/rg/utility_rg_list_lb.go b/internal/service/cloudapi/rg/utility_rg_list_lb.go new file mode 100644 index 0000000..3f5660b --- /dev/null +++ b/internal/service/cloudapi/rg/utility_rg_list_lb.go @@ -0,0 +1,32 @@ +package rg + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityRgListLbCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListLB, error) { + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + + listLb := ListLB{} + + urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int))) + + listLbRaw, err := c.DecortAPICall(ctx, "POST", ResgroupListLbAPI, urlValues) + if err != nil { + return nil, err + } + + err = json.Unmarshal([]byte(listLbRaw), &listLb) + if err != nil { + return nil, err + } + + return listLb, nil +} diff --git a/internal/service/cloudapi/rg/utility_rg_list_pfw.go b/internal/service/cloudapi/rg/utility_rg_list_pfw.go new file mode 100644 index 0000000..df95e37 --- /dev/null +++ b/internal/service/cloudapi/rg/utility_rg_list_pfw.go @@ -0,0 +1,32 @@ +package rg + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityRgListPfwCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListPFW, error) { + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + + listPfw := ListPFW{} + + urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int))) + + listPfwRaw, err := c.DecortAPICall(ctx, "POST", ResgroupListPfwAPI, urlValues) + if err != nil { + return nil, err + } + + err = json.Unmarshal([]byte(listPfwRaw), &listPfw) + if err != nil { + return nil, err + } + + return listPfw, nil +} diff --git a/internal/service/cloudapi/rg/utility_rg_list_vins.go b/internal/service/cloudapi/rg/utility_rg_list_vins.go new file mode 100644 index 0000000..17194f6 --- /dev/null +++ b/internal/service/cloudapi/rg/utility_rg_list_vins.go @@ -0,0 +1,36 @@ +package rg + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityRgListVinsCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListVINS, error) { + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + + listVins := ListVINS{} + + urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int))) + + if val, ok := d.GetOk("reason"); ok { + urlValues.Add("reason", val.(string)) + } + + listVinsRaw, err := c.DecortAPICall(ctx, "POST", ResgroupListVinsAPI, urlValues) + if err != nil { + return nil, err + } + + err = json.Unmarshal([]byte(listVinsRaw), &listVins) + if err != nil { + return nil, err + } + + return listVins, nil +} diff --git a/internal/service/cloudapi/rg/utility_rg_usage.go b/internal/service/cloudapi/rg/utility_rg_usage.go new file mode 100644 index 0000000..cd27d5b --- /dev/null +++ b/internal/service/cloudapi/rg/utility_rg_usage.go @@ -0,0 +1,35 @@ +package rg + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" +) + +func utilityDataRgUsageCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*Resource, error) { + c := m.(*controller.ControllerCfg) + urlValues := url.Values{} + usage := Resource{} + + urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int))) + + if reason, ok := d.GetOk("reason"); ok { + urlValues.Add("reason", reason.(string)) + } + + usageRaw, err := c.DecortAPICall(ctx, "POST", ResgroupUsageAPI, &urlValues) + if err != nil { + return nil, err + } + + err = json.Unmarshal([]byte(usageRaw), &usage) + if err != nil { + return nil, err + } + + return &usage, nil +} diff --git a/internal/service/cloudapi/vins/resource_vins.go b/internal/service/cloudapi/vins/resource_vins.go index 5879e03..966dd9b 100644 --- a/internal/service/cloudapi/vins/resource_vins.go +++ b/internal/service/cloudapi/vins/resource_vins.go @@ -450,7 +450,6 @@ func resourceVinsDelete(ctx context.Context, d *schema.ResourceData, m interface if err != nil { return diag.FromErr(err) } - return nil } diff --git a/samples/cloudapi/data_kvmvm/main.tf b/samples/cloudapi/data_kvmvm/main.tf index 9f080d9..260616b 100644 --- a/samples/cloudapi/data_kvmvm/main.tf +++ b/samples/cloudapi/data_kvmvm/main.tf @@ -26,27 +26,12 @@ provider "decort" { } data "decort_kvmvm" "comp" { - #Для получения информации о виртуальной машине - #можно воспользоваться двумя методами: - #1. получение информации по идентификатору машины - compute_id - #2. получение информации по имени машины и идентификатору ресурсной группы - name и rg_id + #получение информации по идентификатору машины - compute_id #id виртуальной машины - #опциональный параметр + #обязательный параметр #тип - число #compute_id = 11346 - - #название машины - #опциональный параметр - #тип - строка - #используется вместе с параметром rg_id - #name = "test-rg-temp" - - #id ресурсной группы - #опциональный параметр - #тип - число - #используется вместе с параметром name - #rg_id = 1825 } output "test" { diff --git a/samples/cloudapi/data_kvmvm_audits/main.tf b/samples/cloudapi/data_kvmvm_audits/main.tf new file mode 100644 index 0000000..1ec62a1 --- /dev/null +++ b/samples/cloudapi/data_kvmvm_audits/main.tf @@ -0,0 +1,36 @@ +/* +Пример использования +Получение данных об аудитах compute (виртулаьной машине) +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} +*/ +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_kvmvm_audits" "kvmvm_audits" { + #id виртуальной машины + #обязательный параметр + #тип - число + compute_id = 10154 +} + +output "output" { + value = data.decort_kvmvm_audits.kvmvm_audits +} diff --git a/samples/cloudapi/data_kvmvm_get_audits/main.tf b/samples/cloudapi/data_kvmvm_get_audits/main.tf new file mode 100644 index 0000000..e2e9d08 --- /dev/null +++ b/samples/cloudapi/data_kvmvm_get_audits/main.tf @@ -0,0 +1,36 @@ +/* +Пример использования +Получение данных об аудитах compute (виртулаьной машине) +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} +*/ +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_kvmvm_get_audits" "kvmvm_get_audits" { + #id виртуальной машины + #обязательный параметр + #тип - число + compute_id = 10154 +} + +output "output" { + value = data.decort_kvmvm_get_audits.kvmvm_get_audits +} diff --git a/samples/cloudapi/data_kvmvm_get_console_url/main.tf b/samples/cloudapi/data_kvmvm_get_console_url/main.tf new file mode 100644 index 0000000..e7db2d4 --- /dev/null +++ b/samples/cloudapi/data_kvmvm_get_console_url/main.tf @@ -0,0 +1,36 @@ +/* +Пример использования +Получение url compute (виртулаьной машины) +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} +*/ +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_kvmvm_get_console_url" "kvmvm_get_console_url" { + #id виртуальной машины + #обязательный параметр + #тип - число + compute_id = 10154 +} + +output "output" { + value = data.decort_kvmvm_get_console_url.kvmvm_get_console_url +} diff --git a/samples/cloudapi/data_kvmvm_get_log/main.tf b/samples/cloudapi/data_kvmvm_get_log/main.tf new file mode 100644 index 0000000..4b9bce0 --- /dev/null +++ b/samples/cloudapi/data_kvmvm_get_log/main.tf @@ -0,0 +1,41 @@ +/* +Пример использования +Получение логов compute (виртулаьной машины) +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} +*/ +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_kvmvm_get_log" "kvmvm_get_log" { + #id виртуальной машины + #обязательный параметр + #тип - число + compute_id = 10154 + + #путь до log файла + #обязательный параметр + #тип - строка + path = "/var/log/file.log" +} + +output "output" { + value = data.decort_kvmvm_get_log.kvmvm_get_log +} diff --git a/samples/cloudapi/data_kvmvm_list/main.tf b/samples/cloudapi/data_kvmvm_list/main.tf new file mode 100644 index 0000000..38ca7d1 --- /dev/null +++ b/samples/cloudapi/data_kvmvm_list/main.tf @@ -0,0 +1,50 @@ +/* +Пример использования +Получение данных об списке compute (виртулаьных машин) +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} +*/ +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_kvmvm_list" "compute_list" { + #флаг влючения в результат удаленных балансироващиков нагрузки + #опциональный параметр + #тип - булев тип + #значение по-умолчанию - false + #если не задан - выводятся все доступные неудаленные балансировщики + includedeleted = true + + #номер страницы для отображения + #опциональный параметр + #тип - число + #если не задан - выводятся все доступные данные + page = 1 + + #размер страницы + #опциональный параметр + #тип - число + #если не задан - выводятся все доступные данные + size = 1 +} + +output "output" { + value = data.decort_kvmvm_list.compute_list +} diff --git a/samples/cloudapi/data_kvmvm_pfw_list/main.tf b/samples/cloudapi/data_kvmvm_pfw_list/main.tf new file mode 100644 index 0000000..648eac6 --- /dev/null +++ b/samples/cloudapi/data_kvmvm_pfw_list/main.tf @@ -0,0 +1,36 @@ +/* +Пример использования +Получение данных об списке port forwarding compute (виртулаьных машин) +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} +*/ +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_kvmvm_pfw_list" "kvmvm_pfw_list" { + #id виртуальной машины + #обязательный параметр + #тип - число + compute_id = 10524 +} + +output "output" { + value = data.decort_kvmvm_pfw_list.kvmvm_pfw_list +} diff --git a/samples/cloudapi/data_kvmvm_user_list/main.tf b/samples/cloudapi/data_kvmvm_user_list/main.tf new file mode 100644 index 0000000..7bd33db --- /dev/null +++ b/samples/cloudapi/data_kvmvm_user_list/main.tf @@ -0,0 +1,36 @@ +/* +Пример использования +Получение данных об юзерах compute (виртулаьной машины) +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} +*/ +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_kvmvm_user_list" "kvmvm_user_list" { + #id виртуальной машины + #обязательный параметр + #тип - число + compute_id = 10154 +} + +output "output" { + value = data.decort_kvmvm_user_list.kvmvm_user_list +} diff --git a/samples/cloudapi/data_rg/main.tf b/samples/cloudapi/data_rg/main.tf new file mode 100644 index 0000000..3063ce7 --- /dev/null +++ b/samples/cloudapi/data_rg/main.tf @@ -0,0 +1,42 @@ +/* +Пример использования +Получение информации о ресурсной группе RG +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером + +/* +terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} +*/ + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_resgroup" "rg" { + #id ресурсной группы + #обязательный параметр + #тип - число + rg_id = 1535 + #причина для выполняния действия + #необязятельный параметр + #тип - строка + reason = "TEST" +} + +output "output" { + value = data.decort_resgroup.rg +} diff --git a/samples/cloudapi/data_rg_affinity_group_computes/main.tf b/samples/cloudapi/data_rg_affinity_group_computes/main.tf new file mode 100644 index 0000000..1b7de29 --- /dev/null +++ b/samples/cloudapi/data_rg_affinity_group_computes/main.tf @@ -0,0 +1,40 @@ +/* +Пример использования +Получение информации о специальной группе компьютов +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером + +/* terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} */ + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_rg_affinity_group_computes" "lc" { + #id ресурсной группы + #обязательный параметр + #тип - число + rg_id = 123 + #название специальной группы компьютов + #обязательное поле + #тип - строка + affinity_group = "TEST" +} + +output "output" { + value = data.decort_rg_affinity_group_computes.lc +} diff --git a/samples/cloudapi/data_rg_affinity_groups_get/main.tf b/samples/cloudapi/data_rg_affinity_groups_get/main.tf new file mode 100644 index 0000000..9aab26a --- /dev/null +++ b/samples/cloudapi/data_rg_affinity_groups_get/main.tf @@ -0,0 +1,41 @@ +/* +Пример использования +Получение информации о списке компьютов из определенной группы +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером + +/* terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} */ + + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_rg_affinity_groups_get" "get_groups" { + #id ресурсной группы + #обязательный параметр + #тип - число + rg_id = 123 + #название специальной группы компьютов + #обязательное поле + #тип - строка + affinity_group = "TEST" +} + +output "output" { + value = data.decort_rg_affinity_groups_get.get_groups +} diff --git a/samples/cloudapi/data_rg_affinity_groups_list/main.tf b/samples/cloudapi/data_rg_affinity_groups_list/main.tf new file mode 100644 index 0000000..e69fb5b --- /dev/null +++ b/samples/cloudapi/data_rg_affinity_groups_list/main.tf @@ -0,0 +1,36 @@ +/* +Пример использования +Получение информации о списке специальных групп компьютов +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером + +/* terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} */ + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_rg_affinity_groups_list" "list_groups" { + #id ресурсной группы + #обязательный параметр + #тип - число + rg_id = 123 +} + +output "output" { + value = data.decort_rg_affinity_groups_list.list_groups +} diff --git a/samples/cloudapi/data_rg_audits/main.tf b/samples/cloudapi/data_rg_audits/main.tf new file mode 100644 index 0000000..41ba36d --- /dev/null +++ b/samples/cloudapi/data_rg_audits/main.tf @@ -0,0 +1,36 @@ +/* +Пример использования +Получение информации о списке аудитов ресурсной группы +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером + +/* terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} */ + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_rg_audits" "rg_audits" { + #id ресурсной группы + #обязательный параметр + #тип - число + rg_id = 123 +} + +output "output" { + value = data.decort_rg_audits.rg_audits +} diff --git a/samples/cloudapi/data_rg_list/main.tf b/samples/cloudapi/data_rg_list/main.tf index 4b92e76..99b6f45 100644 --- a/samples/cloudapi/data_rg_list/main.tf +++ b/samples/cloudapi/data_rg_list/main.tf @@ -1,21 +1,20 @@ /* Пример использования -Получение списка всех resource groups - +Получение информации о списке всех ресурсных группах к которым есть доступ */ #Расскомментируйте этот код, #и внесите необходимые правки в версию и путь, #чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером -/* -terraform { + +/* terraform { required_providers { decort = { version = "1.1" source = "digitalenergy.online/decort/decort" } } -} -*/ +} */ + provider "decort" { authenticator = "oauth2" #controller_url = @@ -25,25 +24,23 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_rg_list" "rl" { - #включение удаленных rg в результат поиска - #опциональный параметр - #тип - булев тип - #по-умолчанию - false - #includedeleted = true - - #номер страницы для отображения - #опциональный параметр, тип - число - #если не задан - выводятся все доступные данные - #page = 2 - +data "decort_rg_list" "rg_list" { + #отображать удаленные ресурсные группы или нет + #необязательный параметр + #тип - булевый + includedeleted = false + + #номер страницы + #необязательный параметр + #тип - число + page = 1 + #размер страницы - #опциональный параметр, тип - число - #если не задан - выводятся все доступные данные - #size = 3 - + #необязательный параметр + #тип - число + size = 2 } -output "test" { - value = data.decort_rg_list.rl +output "output" { + value = data.decort_rg_list.rg_list } diff --git a/samples/cloudapi/data_rg_list_computes/main.tf b/samples/cloudapi/data_rg_list_computes/main.tf new file mode 100644 index 0000000..f7f09c2 --- /dev/null +++ b/samples/cloudapi/data_rg_list_computes/main.tf @@ -0,0 +1,40 @@ +/* +Пример использования +Получение информации о списке компьютов в ресурсной группе +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером + +/* terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} */ + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_rg_list_computes" "list_computes" { + #id ресурсной группы + #обязательный параметр + #тип - число + rg_id = 123 + #причина для действия + #необязательный параметр + #тип - строка + reason = "TEST" +} + +output "output" { + value = data.decort_rg_list_computes.list_computes +} diff --git a/samples/cloudapi/data_rg_list_deleted/main.tf b/samples/cloudapi/data_rg_list_deleted/main.tf new file mode 100644 index 0000000..9d97505 --- /dev/null +++ b/samples/cloudapi/data_rg_list_deleted/main.tf @@ -0,0 +1,41 @@ +/* +Пример использования +Получение информации о списке удаленных ресурсных групп +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером + +/* terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} */ + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_rg_list_deleted" "list_deleted" { + #номер страницы + #необязательный параметр + #тип - число + page = 1 + + #размер страницы + #необязательный параметр + #тип - число + size = 2 +} + +output "output" { + value = data.decort_rg_list_deleted.list_deleted +} diff --git a/samples/cloudapi/data_rg_list_lb/main.tf b/samples/cloudapi/data_rg_list_lb/main.tf new file mode 100644 index 0000000..df46dd6 --- /dev/null +++ b/samples/cloudapi/data_rg_list_lb/main.tf @@ -0,0 +1,36 @@ +/* +Пример использования +Получение информации о списке балансировщиков в ресурсной группе +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером + +/* terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} */ + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_rg_list_lb" "list_lb" { + #id ресурсной группы + #обязательный параметр + #тип - число + rg_id = 123 +} + +output "output" { + value = data.decort_rg_list_lb.list_lb +} diff --git a/samples/cloudapi/data_rg_list_pfw/main.tf b/samples/cloudapi/data_rg_list_pfw/main.tf new file mode 100644 index 0000000..14dce5c --- /dev/null +++ b/samples/cloudapi/data_rg_list_pfw/main.tf @@ -0,0 +1,36 @@ +/* +Пример использования +Получение информации о списке правил переадресации портов для ресурсной группы. +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером + +/* terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} */ + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_rg_list_pfw" "list_pfw" { + #id ресурсной группы + #обязательный параметр + #тип - число + rg_id = 123 +} + +output "output" { + value = data.decort_rg_list_pfw.list_pfw +} diff --git a/samples/cloudapi/data_rg_list_vins/main.tf b/samples/cloudapi/data_rg_list_vins/main.tf new file mode 100644 index 0000000..42892bc --- /dev/null +++ b/samples/cloudapi/data_rg_list_vins/main.tf @@ -0,0 +1,40 @@ +/* +Пример использования +Получение информации о списке винсов в ресурсной группе +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером + +/* terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} */ + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_rg_list_vins" "list_vins" { + #id ресурсной группы + #обязательный параметр + #тип - число + rg_id = 123 + #причина для действия + #необязательный параметр + #тип - строка + reason = "TEST" +} + +output "output" { + value = data.decort_rg_list_vins.list_vins +} diff --git a/samples/cloudapi/data_rg_usage/main.tf b/samples/cloudapi/data_rg_usage/main.tf new file mode 100644 index 0000000..e6cd73a --- /dev/null +++ b/samples/cloudapi/data_rg_usage/main.tf @@ -0,0 +1,40 @@ +/* +Пример использования +Получение информации об использовании ресурсов на ресурсной группе +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером + +/* terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} */ + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_rg_usage" "rg_usage" { + #id ресурсной группы + #обязательный параметр + #тип - число + rg_id = 123 + #причина для действия + #необязательный параметр + #тип - строка + reason = "TEST" +} + +output "output" { + value = data.decort_rg_usage.rg_usage +} diff --git a/samples/cloudapi/resource_kvmvm/main.tf b/samples/cloudapi/resource_kvmvm/main.tf index c301db8..10e0922 100644 --- a/samples/cloudapi/resource_kvmvm/main.tf +++ b/samples/cloudapi/resource_kvmvm/main.tf @@ -67,6 +67,16 @@ resource "decort_kvmvm" "comp" { #тип - число boot_disk_size = 20 + #ID сепа для boot диска + #опциональный параметр + #тип - число + sep_id = 1 + + #Название пула + #опциональный параметр + #тип - строка + pool = "data02" + #описание compute #опциональный параметр #тип - строка @@ -114,84 +124,80 @@ resource "decort_kvmvm" "comp" { #опциональный параметр #тип - bool permanently = false + } #правила affinity #опциональный параметр #может быть один, несколько или ни одного блока #тип - блок - #affinity_rules { + affinity_rules { #тип правила #возможные значения - compute или node #обязательный параметр #тип - строка - #topology = "compute" + topology = "compute" #строгость правила #возможные значения - RECOMMENDED и REQUIRED #обязательный параметр #тип - строка - #policy = "RECOMMENDED" + policy = "RECOMMENDED" #режим проверки #возможные значения - ANY, EQ, NE #обязательный параметр #тип - строка - #mode = "ANY" + mode = "ANY" #ключ правила #обязательный параметр #тип строка - #key = "testkey" + key = "testkey" #ключ правила #обязательный параметр #тип строка - #value = "testvalue" - #} + value = "testvalue" + } #правила anti-affinity #опциональный параметр #может быть один, несколько или ни одного блока #тип - блок - #anti_affinity_rules { + anti_affinity_rules { #тип правила #возможные значения - compute или node #обязательный параметр #тип - строка - #topology = "compute" + topology = "compute" #строгость правила #возможные значения - RECOMMENDED и REQUIRED #обязательный параметр #тип - строка - #policy = "RECOMMENDED" + policy = "RECOMMENDED" #режим проверки #возможные значения - ANY, EQ, NE #обязательный параметр #тип - строка - #mode = "ANY" + mode = "ANY" #ключ правила #обязательный параметр #тип строка - #key = "testkey" + key = "testkey" #ключ правила #обязательный параметр #тип строка - #value = "testvalue" - #} - - #Флаг доступности компьюта для проведения с ним операций - #опицональный параметр - #тип - bool - #enable = true + value = "testvalue" + } #установка метки для вм #опциональный параметр #тип - строка - #affinity_label = "test4" + affinity_label = "test4" #наименование системы @@ -199,17 +205,182 @@ resource "decort_kvmvm" "comp" { #используется при создании вм #по умолчанию - не задан #тип - строка - #is="" + is="" #назначение вм #опциональный параметр #используется при создании вм #по умолчанию - не задан #тип - строка - #ipa_type = "" - } + ipa_type = "" + + #Id экстра дисков + #опциональный параметр + #тип - список чисел + extra_disks = [1234, 4322, 1344] + + #Присоеденения сетей и удаление сетей в компьюте + #опциональный параметр + #тип - блок + network { + #Тип сети VINS/EXTNET + #Обязательный параметр + #тип - строка + net_type = "VINS" + + #ID сети + #Обязательный параметр + #тип - число + net_id = 1234 + + #IP адрес входящий в сеть + #опциональный параметр + #тип - строка + ip_address = "127.0.0.1" + } + + #добавление и удаление тэгов + #опциональный параметр + #тип - блок + tags { + #Ключ для тэга + #Обязательный параметр + #тип - строка + key = "key" + + #Значения тэга + #Обязательный параметр + #тип - строка + value = "value" + } + + #добавление и удаление port forwarding + #опциональный параметр + #тип - блок + port_forwarding { + #номер внешнего начального порта для правила + #Обязательный параметр + #тип - число + public_port_start = 2023 + + #номер внешнего последнего порта для правила + #опциональный параметр + #тип - число + #по умолчанию - -1 + public_port_end = 2023 + + #номер внутреннего базового порта + #Обязательный параметр + #тип - число + local_port = 80 + + #сетевой протокол + #Обязательный параметр + #тип - строка + proto = "tcp" + } + + #предоставить/забрать пользователю доступ к компьюту + #опциональный параметр + #тип - блок + user_access { + #Имя юзера, которому предоставляем доступ + #Обязательный параметр + #тип - строка + username = "kasim_baybikov_1@decs3o" + + #Права: 'R' - только на чтение, 'RCX' - чтение/запись, 'ARCXDU' - админ + #Обязательный параметр + #тип - строка + access_type = "ARCXDU" + } + + #Создать/удалить снапшот компьюта + #опциональный параметр + #тип - блок + snapshot { + #Лейбл снапшота + #Обязательный параметр + #тип - строка + label = "label1" + } + + #Rollback на нужный снапшот + #опциональный параметр + #Не имеет смысла при отсутсвии снапшотов + #тип - блок + rollback { + #Лейбл снапшота + #Обязательный параметр + #тип - строка + label = "label1" + } + + #Вставить/удалить СD rom + #опциональный параметр + #Максимальное кол-во - 1 + #тип - блок + cd { + #ID образа диска CD rom + #Обязательный параметр + #тип - число + cdrom_id = 344 + } + + #Добавить компьют на стэк + #опциональный параметр + #тип - булев + pin_to_stack = true + + #Применяется только при создании нового экземпляра compute, игнорируется во всех остальных случаях + #опциональный параметр + #тип - строка + cloud_init = "" + + #Флаг доступности компьюта для проведения с ним операций + #опциональный параметр + #тип - булев + enabled = true + #pause/resume компьюта + #опциональный параметр + #тип - булев + pause = true + + #сделать компьют заново + #опциональный параметр + #тип - булев + reset = true + + #флаг для редеплоя компьюта + #опциональный параметр + #тип - булев + auto_start = true + #флаг для редеплоя компьюта + #опциональный параметр + #тип - булев + force_stop = true + + #поле для редеплоя компьюта + #опциональный параметр + #тип - строка + data_disks = "KEEP" + + #запуск/стоп компьюта + #опциональный параметр + #тип - булев + started = true + + #detach диска при удалении компьюта + #опциональный параметр + #тип - булев + detach_disks = true + + #Флаг для удаления компьюта + #опциональный параметр + #тип - bool + permanently = false } output "test" { diff --git a/samples/cloudapi/resource_rg/main.tf b/samples/cloudapi/resource_rg/main.tf new file mode 100644 index 0000000..3edc9db --- /dev/null +++ b/samples/cloudapi/resource_rg/main.tf @@ -0,0 +1,170 @@ +/* +Пример использования +Ресурсов RG +Ресурс позволяет: +1. Создавать +2. Редактировать +3. Удалять +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} +*/ + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +resource "decort_resgroup" "rg" { + #имя ресурсной группы + #обязательный параметр + #тип - строка + name = "testing_rg_1" + + #id аккаунта которому будет принадлежать ресурсная группа + #обязательный параметр + #тип - число + account_id = 123 + + #id сети + #обязательный параметр + #тип - число + gid = 1234 + + #тип сети по умолчанию для этой ресурсной группы. + #виртуальные машины, созданные в этой RG, по умолчанию будут подключены к этой сети. + #Допустимые значения: PRIVATE, PUBLIC, NONE. + #необязательный параметр + #тип - строка + def_net_type = "NONE" + + #ip cidr частной сети, если сеть по умолчанию PRIVATE + #необязательный параметр + #тип - строка + ipcidr = "1.1.1.1" + + #id внешней сети + #необязательный параметр + #тип - число + ext_net_id = 123 + + #ip внешней сети + #необязательный параметр + #тип - строка + ext_ip = "1.1.1.1" + + #причина выполнения + #необязательный параметр + #тип - строка + reason = "TEST" + + #описание + #необязательный параметр + #тип - строка + description = "qwerty" + + #флаг доступности ресурсной группы + #необязательный параметр + #тип - булевый + enable = true + + #блок для предоставления прав на ресурсную группу + #необязательный параметр + #тип - блок + access { + #имя юзера предоставляемому права + #обязательный праметр при использовании блока + #тип - строка + user = "kasim_baybikov_1@decs3o" + #тип прав + #необязательный параметр + #тип - строка + right = "RCX" + } + + #Установить сеть по умолчанию + #необязательный параметр + #тип - блок + #при добавлении блока, удалять его нельзя + def_net { + #тип сети + #обязательный параметр при использовании блока + #тип - строка + net_type = "PUBLIC" + #id сети + #идентификатор сегмента сети. Если net_type — PUBLIC, а net_id — 0, + #то будет выбран сегмент внешней сети по умолчанию. Если net_type + #имеет значение PRIVATE и net_id=0, будет выбран первый vins, определенный для этой ресурсной группы. + #В противном случае net_id идентифицирует либо существующий сегмент внешней сети, либо vins. + #необязательный параметр + #тип - число + net_id = 1234 + #причина выполнения + #необязательный параметр + #тип - строка + reason = "TEST" + } + + #лимиты ресурсов для ресурсной группы + #необязательный параметр + #тип - блок + quota { + #максимальное количество ядер процессора + #необязательный параметр + #тип - число + #значение по умолчанию: -1 + cpu = 5 + #максимальный размер памяти в мегабайтах + #необязательный параметр + #тип - число + #значение по умолчанию: -1 + ram = 1024 + #максимальный размер объединенных виртуальных дисков в ГБ + #необязательный параметр + #тип - число + #значение по умолчанию: -1 + disk = 180 + #максимальное количество отправленных/полученных сетевых узлов + #необязательный параметр + #тип - число + #значение по умолчанию: -1 + ext_traffic = 21 + #максимальное количество назначенных общедоступных IP-адресов + #необязательный параметр + #тип - число + #значение по умолчанию: -1 + ext_ips = 29 + } + + #флаг для принудительного удаления ресурсной группы + #необязательный параметр + #тип - булевый + force = true + + #флаг для моментального удаления ресурсной группы + #необязательный параметр + #тип - булевый + permanently = true + +} + + +output "output" { + value = decort_resgroup.rg +} + +