Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
af82decadd | ||
|
|
f5e0a53364 | ||
|
|
9d1c8eeaa7 | ||
|
|
8516e0419a | ||
|
|
e12afbe1ad | ||
|
|
c0c9dc8131 | ||
|
|
c3bc6ef5da | ||
|
|
4d865ae921 | ||
|
|
a355247845 | ||
|
|
be86069155 |
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -2,7 +2,7 @@ name: Release
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
release:
|
||||
|
||||
58
CHANGELOG.md
58
CHANGELOG.md
@@ -1,10 +1,56 @@
|
||||
### Version 3.2.2
|
||||
### Version 3.5.0
|
||||
|
||||
### Bug fixes
|
||||
## Features
|
||||
|
||||
- Fix bug with getting kvmvm data_source
|
||||
#### Resgroup
|
||||
- Add data source rg_affinity_group_computes
|
||||
- Add data source rg_affinity_groups_get
|
||||
- Add data source rg_affinity_groups_list
|
||||
- Add data source rg_audits
|
||||
- Add data source rg_list
|
||||
- Add data source rg_list_computes
|
||||
- Add data source rg_list_deleted
|
||||
- Add data source rg_list_lb
|
||||
- Add data source rg_list_pfw
|
||||
- Add data source rg_list_vins
|
||||
- Add data source rg_usage
|
||||
- Update data source rg
|
||||
- Update block 'qouta' to change resource limits
|
||||
- Add block 'access' to access/revoke rights for rg
|
||||
- Add block 'def_net' to set default network in rg
|
||||
- Add field 'enable' to disable/enable rg
|
||||
- Add processing of input parameters (account_id, gid, ext_net_id) when creating and updating a resource
|
||||
|
||||
### Features
|
||||
#### Kvmvm
|
||||
- Update data source decort_kvmvm
|
||||
- Add data source decort_kvmvm_list
|
||||
- Add data source decort_kvmvm_audits
|
||||
- Add data source decort_kvmvm_get_audits
|
||||
- Add data source decort_kvmvm_get_console_url
|
||||
- Add data source decort_kvmvm_get_log
|
||||
- Add data source decort_kvmvm_pfw_list
|
||||
- Add data source decort_kvmvm_user_list
|
||||
- Update block 'disks' in the resource decort_kvmvm
|
||||
- Add block 'tags' to add/delete tags
|
||||
- Add block 'port_forwarding' to add/delete pfws
|
||||
- Add block 'user_access' to access/revoke user rights for comptue
|
||||
- Add block 'snapshot' to create/delete snapshots
|
||||
- Add block 'rollback' to rollback in snapshot
|
||||
- Add block 'cd' to insert/Eject cdROM disks
|
||||
- Add field 'pin_to_stack' to pin compute to stack
|
||||
- Add field 'pause' to pause/resume compute
|
||||
- Add field 'reset' to reset compute
|
||||
- Add the ability to redeploy the compute when changing the image_id
|
||||
- Add field 'data_disks' to redeploy compute
|
||||
- Add field 'auto_start' to redeploy compute
|
||||
- Add field 'force_stop' to redeploy compute
|
||||
- Add warnings in Create resource decort_kvmvm
|
||||
- Add processing of input parameters (rg_id, image_id and all vins_id's in blocks 'network') when creating and updating a resource
|
||||
|
||||
- Add enable/disable functionality for kvmvm resource
|
||||
- Add status checker for kvmvm resource
|
||||
## Bug Fix
|
||||
|
||||
- When deleting the 'quote' block, the limits are not set to the default value
|
||||
- Block 'disks' in resource decort_kvmvm breaks the state
|
||||
- Import decort_resgroup resource breaks the state
|
||||
- Import decort_kvmvm resource breaks the state
|
||||
- If the boot_disk_size is not specified at creation, further changing it leads to an error
|
||||
|
||||
10
Dockerfile
10
Dockerfile
@@ -1,10 +0,0 @@
|
||||
FROM docker.io/hashicorp/terraform:latest
|
||||
|
||||
WORKDIR /opt/decort/tf/
|
||||
COPY provider.tf ./
|
||||
COPY terraform-provider-decort ./terraform.d/plugins/digitalenergy.online/decort/decort/3.2.2/linux_amd64/
|
||||
RUN terraform init
|
||||
|
||||
WORKDIR /tf
|
||||
COPY entrypoint.sh /
|
||||
ENTRYPOINT ["/entrypoint.sh", "/bin/terraform"]
|
||||
@@ -1,52 +0,0 @@
|
||||
pipeline {
|
||||
agent {
|
||||
kubernetes {
|
||||
yaml '''
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
spec:
|
||||
containers:
|
||||
- name: alpine
|
||||
image: alpine:3.15
|
||||
command:
|
||||
- sleep
|
||||
- infinity
|
||||
'''
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Dependency check') {
|
||||
environment {
|
||||
DEPCHECKDB = credentials('depcheck-postgres')
|
||||
}
|
||||
steps {
|
||||
container('alpine') {
|
||||
sh 'apk update && apk add openjdk11 java-postgresql-jdbc go'
|
||||
dependencyCheck additionalArguments: '-f JSON -f HTML -n --enableExperimental \
|
||||
-l deplog \
|
||||
--dbDriverName org.postgresql.Driver \
|
||||
--dbDriverPath /usr/share/java/postgresql-jdbc.jar \
|
||||
--dbUser $DEPCHECKDB_USR \
|
||||
--dbPassword $DEPCHECKDB_PSW \
|
||||
--connectionString jdbc:postgresql://postgres-postgresql.postgres/depcheck', odcInstallation: 'depcheck'
|
||||
sh 'cat deplog'
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('SonarQube analysis') {
|
||||
environment {
|
||||
SONARSCANNER_HOME = tool 'sonarscanner'
|
||||
}
|
||||
steps {
|
||||
withSonarQubeEnv('sonarqube') {
|
||||
sh '$SONARSCANNER_HOME/bin/sonar-scanner'
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('SonarQube quality gate') {
|
||||
steps {
|
||||
waitForQualityGate webhookSecretId: 'sonar-webhook', abortPipeline: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
2
Makefile
2
Makefile
@@ -52,4 +52,4 @@ test:
|
||||
echo $(TEST) | xargs -t -n4 go test $(TESTARGS) -timeout=30s -parallel=4
|
||||
|
||||
testacc:
|
||||
TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 120m
|
||||
TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 120m
|
||||
|
||||
13
README.md
13
README.md
@@ -2,12 +2,15 @@
|
||||
|
||||
Terraform provider для платформы Digital Energy Cloud Orchestration Technology (DECORT)
|
||||
|
||||
Внимание: провайдер версии 3.x разработан для DECORT API 3.8.x.
|
||||
Для более старых версий можно использовать:
|
||||
## Соответсвие версий платформы версиям провайдера
|
||||
|
||||
- DECORT API 3.7.x - версия провайдера rc-1.25
|
||||
- DECORT API 3.6.x - версия провайдера rc-1.10
|
||||
- DECORT API до 3.6.0 - terraform DECS provider (https://github.com/rudecs/terraform-provider-decs)
|
||||
| Версия DECORT API | Версия провайдера Terraform |
|
||||
| ------ | ------ |
|
||||
| 3.8.5 | 3.4.x |
|
||||
| 3.8.0 - 3.8.4 | 3.3.1 |
|
||||
| 3.7.x | rc-1.25 |
|
||||
| 3.6.x | rc-1.10 |
|
||||
| до 3.6.0 | [terraform-provider-decs](https://github.com/rudecs/terraform-provider-decs) |
|
||||
|
||||
## Режимы работы
|
||||
|
||||
|
||||
12
README_EN.md
12
README_EN.md
@@ -2,11 +2,15 @@
|
||||
|
||||
Terraform provider for Digital Energy Cloud Orchestration Technology (DECORT) platform
|
||||
|
||||
NOTE: provider 3.x is designed for DECORT API 3.8.x. For older API versions please use:
|
||||
## Mapping of platform versions with provider versions
|
||||
|
||||
- DECORT API 3.7.x versions - provider verion rc-1.25
|
||||
- DECORT API 3.6.x versions - provider version rc-1.10
|
||||
- DECORT API versions prior to 3.6.0 - Terraform DECS provider (https://github.com/rudecs/terraform-provider-decs)
|
||||
| DECORT API version | Terraform provider version |
|
||||
| ------ | ------ |
|
||||
| 3.8.5 | 3.4.x |
|
||||
| 3.8.0 - 3.8.4 | 3.3.1 |
|
||||
| 3.7.x | rc-1.25 |
|
||||
| 3.6.x | rc-1.10 |
|
||||
| до 3.6.0 | [terraform-provider-decs](https://github.com/rudecs/terraform-provider-decs) |
|
||||
|
||||
## Working modes
|
||||
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
cp -aL /opt/decort/tf/* /opt/decort/tf/.* ./
|
||||
exec "$@"
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
53
internal/dc/warnings.go
Normal file
53
internal/dc/warnings.go
Normal file
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://github.com/rudecs/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
//Diagnostics Collector
|
||||
package dc
|
||||
|
||||
import "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
|
||||
type Warnings struct {
|
||||
diagnostics diag.Diagnostics
|
||||
}
|
||||
|
||||
func (w *Warnings) Add(err error) {
|
||||
if w.diagnostics == nil {
|
||||
w.diagnostics = diag.Diagnostics{}
|
||||
}
|
||||
diagFromErr := diag.FromErr(err)
|
||||
diagFromErr[0].Severity = diag.Warning
|
||||
w.diagnostics = append(w.diagnostics, diagFromErr[0])
|
||||
}
|
||||
|
||||
func (w Warnings) Get() diag.Diagnostics {
|
||||
return w.diagnostics
|
||||
}
|
||||
@@ -40,16 +40,39 @@ func NewDataSourcesMap() map[string]*schema.Resource {
|
||||
"decort_account": account.DataSourceAccount(),
|
||||
"decort_resgroup": rg.DataSourceResgroup(),
|
||||
"decort_kvmvm": kvmvm.DataSourceCompute(),
|
||||
"decort_kvmvm_list": kvmvm.DataSourceComputeList(),
|
||||
"decort_kvmvm_audits": kvmvm.DataSourceComputeAudits(),
|
||||
"decort_kvmvm_get_audits": kvmvm.DataSourceComputeGetAudits(),
|
||||
"decort_kvmvm_get_console_url": kvmvm.DataSourceComputeGetConsoleUrl(),
|
||||
"decort_kvmvm_get_log": kvmvm.DataSourceComputeGetLog(),
|
||||
"decort_kvmvm_pfw_list": kvmvm.DataSourceComputePfwList(),
|
||||
"decort_kvmvm_user_list": kvmvm.DataSourceComputeUserList(),
|
||||
"decort_k8s": k8s.DataSourceK8s(),
|
||||
"decort_k8s_list": k8s.DataSourceK8sList(),
|
||||
"decort_k8s_list_deleted": k8s.DataSourceK8sListDeleted(),
|
||||
"decort_k8s_wg": k8s.DataSourceK8sWg(),
|
||||
"decort_k8s_wg_list": k8s.DataSourceK8sWgList(),
|
||||
"decort_vins": vins.DataSourceVins(),
|
||||
"decort_vins_list": vins.DataSourceVinsList(),
|
||||
"decort_vins_audits": vins.DataSourceVinsAudits(),
|
||||
"decort_vins_ip_list": vins.DataSourceVinsIpList(),
|
||||
"decort_vins_list_deleted": vins.DataSourceVinsListDeleted(),
|
||||
"decort_vins_ext_net_list": vins.DataSourceVinsExtNetList(),
|
||||
"decort_vins_nat_rule_list": vins.DataSourceVinsNatRuleList(),
|
||||
"decort_snapshot_list": snapshot.DataSourceSnapshotList(),
|
||||
"decort_disk": disks.DataSourceDisk(),
|
||||
"decort_disk_list": disks.DataSourceDiskList(),
|
||||
"decort_rg_list": rg.DataSourceRgList(),
|
||||
"decort_rg_affinity_group_computes": rg.DataSourceRgAffinityGroupComputes(),
|
||||
"decort_rg_affinity_groups_list": rg.DataSourceRgAffinityGroupsList(),
|
||||
"decort_rg_affinity_groups_get": rg.DataSourceRgAffinityGroupsGet(),
|
||||
"decort_rg_audits": rg.DataSourceRgAudits(),
|
||||
"decort_rg_list_computes": rg.DataSourceRgListComputes(),
|
||||
"decort_rg_list_deleted": rg.DataSourceRgListDeleted(),
|
||||
"decort_rg_list_lb": rg.DataSourceRgListLb(),
|
||||
"decort_rg_list_pfw": rg.DataSourceRgListPfw(),
|
||||
"decort_rg_list_vins": rg.DataSourceRgListVins(),
|
||||
"decort_rg_usage": rg.DataSourceRgUsage(),
|
||||
"decort_disk_list_types_detailed": disks.DataSourceDiskListTypesDetailed(),
|
||||
"decort_disk_list_types": disks.DataSourceDiskListTypes(),
|
||||
"decort_disk_list_deleted": disks.DataSourceDiskListDeleted(),
|
||||
@@ -77,7 +100,6 @@ func NewDataSourcesMap() map[string]*schema.Resource {
|
||||
"decort_extnet_computes_list": extnet.DataSourceExtnetComputesList(),
|
||||
"decort_extnet": extnet.DataSourceExtnet(),
|
||||
"decort_extnet_default": extnet.DataSourceExtnetDefault(),
|
||||
"decort_vins_list": vins.DataSourceVinsList(),
|
||||
"decort_locations_list": locations.DataSourceLocationsList(),
|
||||
"decort_location_url": locations.DataSourceLocationUrl(),
|
||||
"decort_image_list": image.DataSourceImageList(),
|
||||
|
||||
@@ -141,6 +141,22 @@ func flattenAccResources(r Resources) []map[string]interface{} {
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenAccountSeps(seps map[string]map[string]ResourceSep) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for sepKey, sepVal := range seps {
|
||||
for dataKey, dataVal := range sepVal {
|
||||
temp := map[string]interface{}{
|
||||
"sep_id": sepKey,
|
||||
"data_name": dataKey,
|
||||
"disk_size": dataVal.DiskSize,
|
||||
"disk_size_max": dataVal.DiskSizeMax,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenAccResource(r Resource) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
@@ -150,6 +166,7 @@ func flattenAccResource(r Resource) []map[string]interface{} {
|
||||
"exttraffic": r.Exttraffic,
|
||||
"gpu": r.GPU,
|
||||
"ram": r.RAM,
|
||||
"seps": flattenAccountSeps(r.SEPs),
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
@@ -161,6 +178,7 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"dc_location": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -180,7 +198,7 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
},
|
||||
"disksize": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"extips": {
|
||||
@@ -199,6 +217,30 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"seps": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"sep_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -212,7 +254,7 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
},
|
||||
"disksize": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"extips": {
|
||||
@@ -231,6 +273,30 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"seps": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"sep_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -47,6 +47,7 @@ func flattenAccountDisksList(adl AccountDisksList) []map[string]interface{} {
|
||||
"disk_name": ad.Name,
|
||||
"pool": ad.Pool,
|
||||
"sep_id": ad.SepId,
|
||||
"shareable": ad.Shareable,
|
||||
"size_max": ad.SizeMax,
|
||||
"type": ad.Type,
|
||||
}
|
||||
@@ -98,6 +99,10 @@ func dataSourceAccountDisksListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"shareable": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"size_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
|
||||
@@ -74,12 +74,41 @@ func flattenAccRGComputes(argc AccountRGComputes) []map[string]interface{} {
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenAccResourceHack(r ResourceHack) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"cpu": r.CPU,
|
||||
"disksize": r.Disksize,
|
||||
"extips": r.Extips,
|
||||
"exttraffic": r.Exttraffic,
|
||||
"gpu": r.GPU,
|
||||
"ram": r.RAM,
|
||||
//"seps": flattenAccountSeps(r.SEPs),
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenAccResourceRg(r Resource) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"cpu": r.CPU,
|
||||
"disksize": r.Disksize,
|
||||
"extips": r.Extips,
|
||||
"exttraffic": r.Exttraffic,
|
||||
"gpu": r.GPU,
|
||||
"ram": r.RAM,
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenAccRGResources(argr AccountRGResources) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"consumed": flattenAccResource(argr.Consumed),
|
||||
"limits": flattenAccResource(argr.Limits),
|
||||
"reserved": flattenAccResource(argr.Reserved),
|
||||
"consumed": flattenAccResourceRg(argr.Consumed),
|
||||
"limits": flattenAccResourceHack(argr.Limits),
|
||||
"reserved": flattenAccResourceRg(argr.Reserved),
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
|
||||
@@ -89,13 +89,19 @@ type AccountCloudApi struct {
|
||||
|
||||
type AccountCloudApiList []AccountCloudApi
|
||||
|
||||
type ResourceSep struct {
|
||||
DiskSize float64 `json:"disksize"`
|
||||
DiskSizeMax int `json:"disksizemax"`
|
||||
}
|
||||
|
||||
type Resource struct {
|
||||
CPU int `json:"cpu"`
|
||||
Disksize int `json:"disksize"`
|
||||
Extips int `json:"extips"`
|
||||
Exttraffic int `json:"exttraffic"`
|
||||
GPU int `json:"gpu"`
|
||||
RAM int `json:"ram"`
|
||||
CPU int `json:"cpu"`
|
||||
Disksize float64 `json:"disksize"`
|
||||
Extips int `json:"extips"`
|
||||
Exttraffic int `json:"exttraffic"`
|
||||
GPU int `json:"gpu"`
|
||||
RAM int `json:"ram"`
|
||||
SEPs map[string]map[string]ResourceSep `json:"seps"`
|
||||
}
|
||||
|
||||
type Resources struct {
|
||||
@@ -147,12 +153,13 @@ type AccountCompute struct {
|
||||
type AccountComputesList []AccountCompute
|
||||
|
||||
type AccountDisk struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Pool string `json:"pool"`
|
||||
SepId int `json:"sepId"`
|
||||
SizeMax int `json:"sizeMax"`
|
||||
Type string `json:"type"`
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Pool string `json:"pool"`
|
||||
SepId int `json:"sepId"`
|
||||
Shareable bool `json:"shareable"`
|
||||
SizeMax int `json:"sizeMax"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type AccountDisksList []AccountDisk
|
||||
@@ -194,10 +201,19 @@ type AccountRGComputes struct {
|
||||
Stopped int `json:"Stopped"`
|
||||
}
|
||||
|
||||
type ResourceHack struct {
|
||||
CPU int `json:"cpu"`
|
||||
Disksize float64 `json:"disksize"`
|
||||
Extips int `json:"extips"`
|
||||
Exttraffic int `json:"exttraffic"`
|
||||
GPU int `json:"gpu"`
|
||||
RAM int `json:"ram"`
|
||||
}
|
||||
|
||||
type AccountRGResources struct {
|
||||
Consumed Resource `json:"Consumed"`
|
||||
Limits Resource `json:"Limits"`
|
||||
Reserved Resource `json:"Reserved"`
|
||||
Consumed Resource `json:"Consumed"`
|
||||
Limits ResourceHack `json:"Limits"`
|
||||
Reserved Resource `json:"Reserved"`
|
||||
}
|
||||
|
||||
type AccountRG struct {
|
||||
|
||||
@@ -574,7 +574,7 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
},
|
||||
"disksize": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"extips": {
|
||||
@@ -593,6 +593,30 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"seps": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"sep_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -606,7 +630,7 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
},
|
||||
"disksize": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"extips": {
|
||||
@@ -625,6 +649,30 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"seps": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"sep_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -47,4 +47,6 @@ const (
|
||||
|
||||
disksSnapshotDeleteAPI = "/restmachine/cloudapi/disks/snapshotDelete"
|
||||
disksSnapshotRollbackAPI = "/restmachine/cloudapi/disks/snapshotRollback"
|
||||
disksShareAPI = "/restmachine/cloudapi/disks/share"
|
||||
disksUnshareAPI = "/restmachine/cloudapi/disks/unshare"
|
||||
)
|
||||
|
||||
@@ -60,8 +60,7 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface
|
||||
d.Set("account_name", disk.AccountName)
|
||||
d.Set("acl", string(diskAcl))
|
||||
d.Set("boot_partition", disk.BootPartition)
|
||||
d.Set("compute_id", disk.ComputeID)
|
||||
d.Set("compute_name", disk.ComputeName)
|
||||
d.Set("computes", flattenDiskComputes(disk.Computes))
|
||||
d.Set("created_time", disk.CreatedTime)
|
||||
d.Set("deleted_time", disk.DeletedTime)
|
||||
d.Set("desc", disk.Desc)
|
||||
@@ -84,6 +83,7 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface
|
||||
d.Set("passwd", disk.Passwd)
|
||||
d.Set("pci_slot", disk.PciSlot)
|
||||
d.Set("pool", disk.Pool)
|
||||
d.Set("present_to", disk.PresentTo)
|
||||
d.Set("purge_attempts", disk.PurgeAttempts)
|
||||
d.Set("purge_time", disk.PurgeTime)
|
||||
d.Set("reality_device_number", disk.RealityDeviceNumber)
|
||||
@@ -93,6 +93,7 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface
|
||||
d.Set("role", disk.Role)
|
||||
d.Set("sep_id", disk.SepID)
|
||||
d.Set("sep_type", disk.SepType)
|
||||
d.Set("shareable", disk.Shareable)
|
||||
d.Set("size_max", disk.SizeMax)
|
||||
d.Set("size_used", disk.SizeUsed)
|
||||
d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots))
|
||||
@@ -130,15 +131,21 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
Description: "Number of disk partitions",
|
||||
},
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Compute ID",
|
||||
},
|
||||
"compute_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Compute name",
|
||||
"computes": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"compute_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"compute_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"created_time": {
|
||||
Type: schema.TypeInt,
|
||||
@@ -316,6 +323,13 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
Description: "Pool for disk location",
|
||||
},
|
||||
"present_to": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"purge_attempts": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -361,13 +375,17 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform",
|
||||
},
|
||||
"shareable": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"size_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Size in GB",
|
||||
},
|
||||
"size_used": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
Description: "Number of used space, in GB",
|
||||
},
|
||||
|
||||
@@ -42,6 +42,18 @@ import (
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func flattenDiskComputes(computes map[string]string) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for computeKey, computeVal := range computes {
|
||||
temp := map[string]interface{}{
|
||||
"compute_id": computeKey,
|
||||
"compute_name": computeVal,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenIOTune(iot IOTune) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
@@ -72,9 +84,8 @@ func flattenDiskList(dl DisksList) []map[string]interface{} {
|
||||
"account_id": disk.AccountID,
|
||||
"account_name": disk.AccountName,
|
||||
"acl": string(diskAcl),
|
||||
"computes": flattenDiskComputes(disk.Computes),
|
||||
"boot_partition": disk.BootPartition,
|
||||
"compute_id": disk.ComputeID,
|
||||
"compute_name": disk.ComputeName,
|
||||
"created_time": disk.CreatedTime,
|
||||
"deleted_time": disk.DeletedTime,
|
||||
"desc": disk.Desc,
|
||||
@@ -99,6 +110,7 @@ func flattenDiskList(dl DisksList) []map[string]interface{} {
|
||||
"passwd": disk.Passwd,
|
||||
"pci_slot": disk.PciSlot,
|
||||
"pool": disk.Pool,
|
||||
"present_to": disk.PresentTo,
|
||||
"purge_attempts": disk.PurgeAttempts,
|
||||
"purge_time": disk.PurgeTime,
|
||||
"reality_device_number": disk.RealityDeviceNumber,
|
||||
@@ -108,6 +120,7 @@ func flattenDiskList(dl DisksList) []map[string]interface{} {
|
||||
"role": disk.Role,
|
||||
"sep_id": disk.SepID,
|
||||
"sep_type": disk.SepType,
|
||||
"shareable": disk.Shareable,
|
||||
"size_max": disk.SizeMax,
|
||||
"size_used": disk.SizeUsed,
|
||||
"snapshots": flattenDiskSnapshotList(disk.Snapshots),
|
||||
@@ -199,15 +212,21 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
Description: "Number of disk partitions",
|
||||
},
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Compute ID",
|
||||
},
|
||||
"compute_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Compute name",
|
||||
"computes": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"compute_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"compute_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"created_time": {
|
||||
Type: schema.TypeInt,
|
||||
@@ -400,6 +419,13 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
Description: "Pool for disk location",
|
||||
},
|
||||
"present_to": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"purge_attempts": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -445,13 +471,17 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform",
|
||||
},
|
||||
"shareable": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"size_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Size in GB",
|
||||
},
|
||||
"size_used": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
Description: "Number of used space, in GB",
|
||||
},
|
||||
|
||||
@@ -415,7 +415,7 @@ func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
|
||||
Description: "Size in GB",
|
||||
},
|
||||
"size_used": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
Description: "Number of used space, in GB",
|
||||
},
|
||||
|
||||
@@ -37,9 +37,8 @@ type Disk struct {
|
||||
AccountID int `json:"accountId"`
|
||||
AccountName string `json:"accountName"`
|
||||
BootPartition int `json:"bootPartition"`
|
||||
Computes map[string]string `json:"computes"`
|
||||
CreatedTime uint64 `json:"creationTime"`
|
||||
ComputeID int `json:"computeId"`
|
||||
ComputeName string `json:"computeName"`
|
||||
DeletedTime uint64 `json:"deletionTime"`
|
||||
DeviceName string `json:"devicename"`
|
||||
Desc string `json:"desc"`
|
||||
@@ -63,6 +62,7 @@ type Disk struct {
|
||||
ParentId int `json:"parentId"`
|
||||
PciSlot int `json:"pciSlot"`
|
||||
Pool string `json:"pool"`
|
||||
PresentTo []int `json:"presentTo"`
|
||||
PurgeTime uint64 `json:"purgeTime"`
|
||||
PurgeAttempts uint64 `json:"purgeAttempts"`
|
||||
RealityDeviceNumber int `json:"realityDeviceNumber"`
|
||||
@@ -71,9 +71,10 @@ type Disk struct {
|
||||
ResName string `json:"resName"`
|
||||
Role string `json:"role"`
|
||||
SepType string `json:"sepType"`
|
||||
Shareable bool `json:"shareable"`
|
||||
SepID int `json:"sepId"` // NOTE: absent from compute/get output
|
||||
SizeMax int `json:"sizeMax"`
|
||||
SizeUsed int `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
|
||||
SizeUsed float64 `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
|
||||
Snapshots []Snapshot `json:"snapshots"`
|
||||
Status string `json:"status"`
|
||||
TechStatus string `json:"techStatus"`
|
||||
@@ -164,7 +165,7 @@ type Unattached struct {
|
||||
Role string `json:"role"`
|
||||
SepID int `json:"sepId"`
|
||||
SizeMax int `json:"sizeMax"`
|
||||
SizeUsed int `json:"sizeUsed"`
|
||||
SizeUsed float64 `json:"sizeUsed"`
|
||||
Snapshots []Snapshot `json:"snapshots"`
|
||||
Status string `json:"status"`
|
||||
TechStatus string `json:"techStatus"`
|
||||
@@ -173,3 +174,8 @@ type Unattached struct {
|
||||
}
|
||||
|
||||
type UnattachedList []Unattached
|
||||
|
||||
type Pair struct {
|
||||
intPort int
|
||||
extPortStart int
|
||||
}
|
||||
|
||||
@@ -42,6 +42,7 @@ import (
|
||||
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/dc"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/status"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
@@ -112,6 +113,15 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
|
||||
if shareable := d.Get("shareable"); shareable.(bool) == true {
|
||||
urlValues.Add("diskId", diskId)
|
||||
_, err := c.DecortAPICall(ctx, "POST", disksShareAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
|
||||
dgn := resourceDiskRead(ctx, d, m)
|
||||
if dgn != nil {
|
||||
return dgn
|
||||
@@ -123,6 +133,7 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
|
||||
func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
urlValues := &url.Values{}
|
||||
c := m.(*controller.ControllerCfg)
|
||||
warnings := dc.Warnings{}
|
||||
|
||||
disk, err := utilityDiskCheckPresence(ctx, d, m)
|
||||
if disk == nil {
|
||||
@@ -133,17 +144,21 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
|
||||
return nil
|
||||
}
|
||||
|
||||
hasChangeState := false
|
||||
if disk.Status == status.Destroyed || disk.Status == status.Purged {
|
||||
d.Set("disk_id", 0)
|
||||
return resourceDiskCreate(ctx, d, m)
|
||||
} else if disk.Status == status.Deleted {
|
||||
hasChangeState = true
|
||||
urlValues.Add("diskId", d.Id())
|
||||
urlValues.Add("reason", d.Get("reason").(string))
|
||||
|
||||
_, err := c.DecortAPICall(ctx, "POST", disksRestoreAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
warnings.Add(err)
|
||||
}
|
||||
}
|
||||
if hasChangeState {
|
||||
urlValues = &url.Values{}
|
||||
disk, err = utilityDiskCheckPresence(ctx, d, m)
|
||||
if disk == nil {
|
||||
@@ -161,8 +176,7 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
|
||||
d.Set("account_name", disk.AccountName)
|
||||
d.Set("acl", string(diskAcl))
|
||||
d.Set("boot_partition", disk.BootPartition)
|
||||
d.Set("compute_id", disk.ComputeID)
|
||||
d.Set("compute_name", disk.ComputeName)
|
||||
d.Set("computes", flattenDiskComputes(disk.Computes))
|
||||
d.Set("created_time", disk.CreatedTime)
|
||||
d.Set("deleted_time", disk.DeletedTime)
|
||||
d.Set("desc", disk.Desc)
|
||||
@@ -185,6 +199,7 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
|
||||
d.Set("passwd", disk.Passwd)
|
||||
d.Set("pci_slot", disk.PciSlot)
|
||||
d.Set("pool", disk.Pool)
|
||||
d.Set("present_to", disk.PresentTo)
|
||||
d.Set("purge_attempts", disk.PurgeAttempts)
|
||||
d.Set("purge_time", disk.PurgeTime)
|
||||
d.Set("reality_device_number", disk.RealityDeviceNumber)
|
||||
@@ -196,13 +211,14 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
|
||||
d.Set("sep_type", disk.SepType)
|
||||
d.Set("size_max", disk.SizeMax)
|
||||
d.Set("size_used", disk.SizeUsed)
|
||||
d.Set("shareable", disk.Shareable)
|
||||
d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots))
|
||||
d.Set("status", disk.Status)
|
||||
d.Set("tech_status", disk.TechStatus)
|
||||
d.Set("type", disk.Type)
|
||||
d.Set("vmid", disk.VMID)
|
||||
|
||||
return nil
|
||||
return warnings.Get()
|
||||
}
|
||||
|
||||
func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
@@ -215,18 +231,6 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if disk.Status == status.Destroyed || disk.Status == status.Purged {
|
||||
return resourceDiskCreate(ctx, d, m)
|
||||
} else if disk.Status == status.Deleted {
|
||||
urlValues.Add("diskId", d.Id())
|
||||
urlValues.Add("reason", d.Get("reason").(string))
|
||||
|
||||
_, err := c.DecortAPICall(ctx, "POST", disksRestoreAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
|
||||
if d.HasChange("size_max") {
|
||||
oldSize, newSize := d.GetChange("size_max")
|
||||
@@ -283,6 +287,24 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
|
||||
if d.HasChange("shareable") {
|
||||
oldShare, newShare := d.GetChange("shareable")
|
||||
urlValues = &url.Values{}
|
||||
urlValues.Add("diskId", d.Id())
|
||||
if oldShare.(bool) == false && newShare.(bool) == true {
|
||||
_, err := c.DecortAPICall(ctx, "POST", disksShareAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
if oldShare.(bool) == true && newShare.(bool) == false {
|
||||
_, err := c.DecortAPICall(ctx, "POST", disksUnshareAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return resourceDiskRead(ctx, d, m)
|
||||
}
|
||||
|
||||
@@ -341,6 +363,13 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
Description: "Pool for disk location",
|
||||
},
|
||||
"present_to": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
@@ -360,7 +389,6 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
ValidateFunc: validation.StringInSlice([]string{"D", "B", "T"}, false),
|
||||
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
|
||||
},
|
||||
|
||||
"detach": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
@@ -379,6 +407,11 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Default: "",
|
||||
Description: "Reason for deletion",
|
||||
},
|
||||
"shareable": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"disk_id": {
|
||||
Type: schema.TypeInt,
|
||||
@@ -399,15 +432,21 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
Description: "Number of disk partitions",
|
||||
},
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Compute ID",
|
||||
},
|
||||
"compute_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Compute name",
|
||||
"computes": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"compute_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"compute_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"created_time": {
|
||||
Type: schema.TypeInt,
|
||||
@@ -622,7 +661,7 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform",
|
||||
},
|
||||
"size_used": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
Description: "Number of used space, in GB",
|
||||
},
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -80,6 +81,7 @@ func flattenImage(d *schema.ResourceData, img *ImageExtend) {
|
||||
d.Set("pool_name", img.Pool)
|
||||
d.Set("provider_name", img.ProviderName)
|
||||
d.Set("purge_attempts", img.PurgeAttempts)
|
||||
d.Set("present_to", img.PresentTo)
|
||||
d.Set("res_id", img.ResId)
|
||||
d.Set("rescuecd", img.RescueCD)
|
||||
d.Set("sep_id", img.SepId)
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -161,6 +162,13 @@ func dataSourceImageExtendSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"present_to": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"res_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -134,6 +135,7 @@ type ImageExtend struct {
|
||||
Password string `json:"password"`
|
||||
Pool string `json:"pool"`
|
||||
ProviderName string `json:"provider_name"`
|
||||
PresentTo []int `json:"presentTo"`
|
||||
PurgeAttempts int `json:"purgeAttempts"`
|
||||
ResId string `json:"resId"`
|
||||
RescueCD bool `json:"rescuecd"`
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,3 +1,35 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://github.com/rudecs/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package k8s
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,35 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://github.com/rudecs/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package k8s
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,35 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://github.com/rudecs/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package k8s
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,35 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://github.com/rudecs/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package k8s
|
||||
|
||||
import (
|
||||
@@ -11,19 +43,6 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func flattenWgData(d *schema.ResourceData, wg K8SGroup, computes []kvmvm.ComputeGetResp) {
|
||||
d.Set("annotations", wg.Annotations)
|
||||
d.Set("cpu", wg.CPU)
|
||||
d.Set("detailed_info", flattenDetailedInfo(wg.DetailedInfo, computes))
|
||||
d.Set("disk", wg.Disk)
|
||||
d.Set("guid", wg.GUID)
|
||||
d.Set("labels", wg.Labels)
|
||||
d.Set("name", wg.Name)
|
||||
d.Set("num", wg.Num)
|
||||
d.Set("ram", wg.RAM)
|
||||
d.Set("taints", wg.Taints)
|
||||
}
|
||||
|
||||
func dataSourceK8sWgRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
log.Debugf("dataSourceK8sWgRead: called with k8s id %d", d.Get("k8s_id").(int))
|
||||
|
||||
@@ -1,3 +1,35 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://github.com/rudecs/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package k8s
|
||||
|
||||
import (
|
||||
@@ -13,33 +45,6 @@ import (
|
||||
"github.com/rudecs/terraform-provider-decort/internal/service/cloudapi/kvmvm"
|
||||
)
|
||||
|
||||
func flattenWgList(wgList K8SGroupList, computesMap map[uint64][]kvmvm.ComputeGetResp) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, wg := range wgList {
|
||||
computes := computesMap[wg.ID]
|
||||
temp := map[string]interface{}{
|
||||
"annotations": wg.Annotations,
|
||||
"cpu": wg.CPU,
|
||||
"wg_id": wg.ID,
|
||||
"detailed_info": flattenDetailedInfo(wg.DetailedInfo, computes),
|
||||
"disk": wg.Disk,
|
||||
"guid": wg.GUID,
|
||||
"labels": wg.Labels,
|
||||
"name": wg.Name,
|
||||
"num": wg.Num,
|
||||
"ram": wg.RAM,
|
||||
"taints": wg.Taints,
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenItemsWg(d *schema.ResourceData, wgList K8SGroupList, computes map[uint64][]kvmvm.ComputeGetResp) {
|
||||
d.Set("items", flattenWgList(wgList, computes))
|
||||
}
|
||||
|
||||
func utilityK8sWgListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (K8SGroupList, error) {
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
@@ -1,3 +1,35 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://github.com/rudecs/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package k8s
|
||||
|
||||
import (
|
||||
@@ -245,3 +277,43 @@ func flattenResourceK8s(d *schema.ResourceData, k8s K8SRecord, masters []kvmvm.C
|
||||
d.Set("updated_time", k8s.UpdatedTime)
|
||||
d.Set("default_wg_id", k8s.K8SGroups.Workers[0].ID)
|
||||
}
|
||||
|
||||
func flattenWgData(d *schema.ResourceData, wg K8SGroup, computes []kvmvm.ComputeGetResp) {
|
||||
d.Set("annotations", wg.Annotations)
|
||||
d.Set("cpu", wg.CPU)
|
||||
d.Set("detailed_info", flattenDetailedInfo(wg.DetailedInfo, computes))
|
||||
d.Set("disk", wg.Disk)
|
||||
d.Set("guid", wg.GUID)
|
||||
d.Set("labels", wg.Labels)
|
||||
d.Set("name", wg.Name)
|
||||
d.Set("num", wg.Num)
|
||||
d.Set("ram", wg.RAM)
|
||||
d.Set("taints", wg.Taints)
|
||||
}
|
||||
|
||||
func flattenWgList(wgList K8SGroupList, computesMap map[uint64][]kvmvm.ComputeGetResp) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, wg := range wgList {
|
||||
computes := computesMap[wg.ID]
|
||||
temp := map[string]interface{}{
|
||||
"annotations": wg.Annotations,
|
||||
"cpu": wg.CPU,
|
||||
"wg_id": wg.ID,
|
||||
"detailed_info": flattenDetailedInfo(wg.DetailedInfo, computes),
|
||||
"disk": wg.Disk,
|
||||
"guid": wg.GUID,
|
||||
"labels": wg.Labels,
|
||||
"name": wg.Name,
|
||||
"num": wg.Num,
|
||||
"ram": wg.RAM,
|
||||
"taints": wg.Taints,
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenItemsWg(d *schema.ResourceData, wgList K8SGroupList, computes map[uint64][]kvmvm.ComputeGetResp) {
|
||||
d.Set("items", flattenWgList(wgList, computes))
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -48,6 +49,8 @@ type K8sNodeRecord struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
} `json:"detailedInfo"`
|
||||
SepID int `json:"SepId"`
|
||||
SepPool string `json:"SepPool"`
|
||||
}
|
||||
|
||||
//K8sRecord represents k8s instance
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -55,10 +56,12 @@ func parseNode(nodeList []interface{}) K8sNodeRecord {
|
||||
node := nodeList[0].(map[string]interface{})
|
||||
|
||||
return K8sNodeRecord{
|
||||
Num: node["num"].(int),
|
||||
Cpu: node["cpu"].(int),
|
||||
Ram: node["ram"].(int),
|
||||
Disk: node["disk"].(int),
|
||||
Num: node["num"].(int),
|
||||
Cpu: node["cpu"].(int),
|
||||
Ram: node["ram"].(int),
|
||||
Disk: node["disk"].(int),
|
||||
SepID: node["sep_id"].(int),
|
||||
SepPool: node["sep_pool"].(string),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -111,6 +114,14 @@ func mastersSchemaMake() map[string]*schema.Schema {
|
||||
Required: true,
|
||||
Description: "Number of nodes to create.",
|
||||
}
|
||||
masters["sep_id"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
}
|
||||
masters["sep_pool"] = &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
}
|
||||
masters["cpu"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
@@ -139,6 +150,14 @@ func workersSchemaMake() map[string]*schema.Schema {
|
||||
Required: true,
|
||||
Description: "Number of nodes to create.",
|
||||
}
|
||||
workers["sep_id"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
}
|
||||
workers["sep_pool"] = &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
}
|
||||
workers["cpu"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -68,6 +69,8 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
|
||||
urlValues.Add("masterCpu", strconv.Itoa(masterNode.Cpu))
|
||||
urlValues.Add("masterRam", strconv.Itoa(masterNode.Ram))
|
||||
urlValues.Add("masterDisk", strconv.Itoa(masterNode.Disk))
|
||||
urlValues.Add("masterSepId", strconv.Itoa(masterNode.SepID))
|
||||
urlValues.Add("masterSepPool", masterNode.SepPool)
|
||||
|
||||
var workerNode K8sNodeRecord
|
||||
if workers, ok := d.GetOk("workers"); ok {
|
||||
@@ -79,6 +82,29 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
|
||||
urlValues.Add("workerCpu", strconv.Itoa(workerNode.Cpu))
|
||||
urlValues.Add("workerRam", strconv.Itoa(workerNode.Ram))
|
||||
urlValues.Add("workerDisk", strconv.Itoa(workerNode.Disk))
|
||||
urlValues.Add("workerSepId", strconv.Itoa(workerNode.SepID))
|
||||
urlValues.Add("workerSepPool", workerNode.SepPool)
|
||||
|
||||
if labels, ok := d.GetOk("labels"); ok {
|
||||
labels := labels.([]interface{})
|
||||
for _, label := range labels {
|
||||
urlValues.Add("labels", label.(string))
|
||||
}
|
||||
}
|
||||
|
||||
if taints, ok := d.GetOk("taints"); ok {
|
||||
taints := taints.([]interface{})
|
||||
for _, taint := range taints {
|
||||
urlValues.Add("taints", taint.(string))
|
||||
}
|
||||
}
|
||||
|
||||
if annotations, ok := d.GetOk("annotations"); ok {
|
||||
annotations := annotations.([]interface{})
|
||||
for _, annotation := range annotations {
|
||||
urlValues.Add("annotations", annotation.(string))
|
||||
}
|
||||
}
|
||||
|
||||
if withLB, ok := d.GetOk("with_lb"); ok {
|
||||
urlValues.Add("withLB", strconv.FormatBool(withLB.(bool)))
|
||||
@@ -132,7 +158,6 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
|
||||
}
|
||||
|
||||
func resourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
//log.Debugf("resourceK8sRead: called with id %s, rg %d", d.Id(), d.Get("rg_id").(int))
|
||||
|
||||
k8s, err := utilityDataK8sCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
@@ -281,28 +306,45 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
|
||||
Required: true,
|
||||
Description: "Name of the cluster.",
|
||||
},
|
||||
|
||||
"rg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Description: "Resource group ID that this instance belongs to.",
|
||||
},
|
||||
|
||||
"k8sci_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Description: "ID of the k8s catalog item to base this instance on.",
|
||||
},
|
||||
|
||||
"wg_name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Description: "Name for first worker group created with cluster.",
|
||||
},
|
||||
|
||||
"labels": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"taints": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"annotations": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"masters": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
@@ -314,7 +356,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
|
||||
},
|
||||
Description: "Master node(s) configuration.",
|
||||
},
|
||||
|
||||
"workers": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
@@ -325,7 +366,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
|
||||
},
|
||||
Description: "Worker node(s) configuration.",
|
||||
},
|
||||
|
||||
"with_lb": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
@@ -333,7 +373,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
|
||||
Default: true,
|
||||
Description: "Create k8s with load balancer if true.",
|
||||
},
|
||||
|
||||
"extnet_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
@@ -341,7 +380,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
|
||||
ForceNew: true,
|
||||
Description: "ID of the external network to connect workers to. If omitted network will be chosen by the platfom.",
|
||||
},
|
||||
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
@@ -416,13 +454,11 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"default_wg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "ID of default workers group for this instace.",
|
||||
},
|
||||
|
||||
"kubeconfig": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -33,23 +33,55 @@ Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
package kvmvm
|
||||
|
||||
const (
|
||||
KvmX86CreateAPI = "/restmachine/cloudapi/kvmx86/create"
|
||||
KvmPPCCreateAPI = "/restmachine/cloudapi/kvmppc/create"
|
||||
ComputeGetAPI = "/restmachine/cloudapi/compute/get"
|
||||
RgListComputesAPI = "/restmachine/cloudapi/rg/listComputes"
|
||||
ComputeNetAttachAPI = "/restmachine/cloudapi/compute/netAttach"
|
||||
ComputeNetDetachAPI = "/restmachine/cloudapi/compute/netDetach"
|
||||
ComputeDiskAttachAPI = "/restmachine/cloudapi/compute/diskAttach"
|
||||
ComputeDiskDetachAPI = "/restmachine/cloudapi/compute/diskDetach"
|
||||
ComputeStartAPI = "/restmachine/cloudapi/compute/start"
|
||||
ComputeStopAPI = "/restmachine/cloudapi/compute/stop"
|
||||
ComputeResizeAPI = "/restmachine/cloudapi/compute/resize"
|
||||
DisksResizeAPI = "/restmachine/cloudapi/disks/resize2"
|
||||
ComputeDeleteAPI = "/restmachine/cloudapi/compute/delete"
|
||||
ComputeUpdateAPI = "/restmachine/cloudapi/compute/update"
|
||||
ComputeDiskAddAPI = "/restmachine/cloudapi/compute/diskAdd"
|
||||
ComputeDiskDeleteAPI = "/restmachine/cloudapi/compute/diskDel"
|
||||
ComputeRestoreAPI = "/restmachine/cloudapi/compute/restore"
|
||||
ComputeEnableAPI = "/restmachine/cloudapi/compute/enable"
|
||||
ComputeDisableAPI = "/restmachine/cloudapi/compute/disable"
|
||||
KvmX86CreateAPI = "/restmachine/cloudapi/kvmx86/create"
|
||||
KvmPPCCreateAPI = "/restmachine/cloudapi/kvmppc/create"
|
||||
ComputeGetAPI = "/restmachine/cloudapi/compute/get"
|
||||
RgListComputesAPI = "/restmachine/cloudapi/rg/listComputes"
|
||||
ComputeNetAttachAPI = "/restmachine/cloudapi/compute/netAttach"
|
||||
ComputeNetDetachAPI = "/restmachine/cloudapi/compute/netDetach"
|
||||
ComputeDiskAttachAPI = "/restmachine/cloudapi/compute/diskAttach"
|
||||
ComputeDiskDetachAPI = "/restmachine/cloudapi/compute/diskDetach"
|
||||
ComputeStartAPI = "/restmachine/cloudapi/compute/start"
|
||||
ComputeStopAPI = "/restmachine/cloudapi/compute/stop"
|
||||
ComputeResizeAPI = "/restmachine/cloudapi/compute/resize"
|
||||
DisksResizeAPI = "/restmachine/cloudapi/disks/resize2"
|
||||
ComputeDeleteAPI = "/restmachine/cloudapi/compute/delete"
|
||||
ComputeUpdateAPI = "/restmachine/cloudapi/compute/update"
|
||||
ComputeDiskAddAPI = "/restmachine/cloudapi/compute/diskAdd"
|
||||
ComputeDiskDeleteAPI = "/restmachine/cloudapi/compute/diskDel"
|
||||
ComputeRestoreAPI = "/restmachine/cloudapi/compute/restore"
|
||||
ComputeEnableAPI = "/restmachine/cloudapi/compute/enable"
|
||||
ComputeDisableAPI = "/restmachine/cloudapi/compute/disable"
|
||||
ComputeAffinityLabelSetAPI = "/restmachine/cloudapi/compute/affinityLabelSet"
|
||||
ComputeAffinityLabelRemoveAPI = "/restmachine/cloudapi/compute/affinityLabelRemove"
|
||||
ComputeAffinityRuleAddAPI = "/restmachine/cloudapi/compute/affinityRuleAdd"
|
||||
ComputeAffinityRuleRemoveAPI = "/restmachine/cloudapi/compute/affinityRuleRemove"
|
||||
ComputeAffinityRulesClearAPI = "/restmachine/cloudapi/compute/affinityRulesClear"
|
||||
ComputeAntiAffinityRuleAddAPI = "/restmachine/cloudapi/compute/antiAffinityRuleAdd"
|
||||
ComputeAntiAffinityRuleRemoveAPI = "/restmachine/cloudapi/compute/antiAffinityRuleRemove"
|
||||
ComputeAntiAffinityRulesClearAPI = "/restmachine/cloudapi/compute/antiAffinityRulesClear"
|
||||
ComputeListAPI = "/restmachine/cloudapi/compute/list"
|
||||
ComputeAuditsAPI = "/restmachine/cloudapi/compute/audits"
|
||||
ComputeGetAuditsAPI = "/restmachine/cloudapi/compute/getAudits"
|
||||
ComputeGetConsoleUrlAPI = "/restmachine/cloudapi/compute/getConsoleUrl"
|
||||
ComputeGetLogAPI = "/restmachine/cloudapi/compute/getLog"
|
||||
ComputePfwListAPI = "/restmachine/cloudapi/compute/pfwList"
|
||||
ComputeUserListAPI = "/restmachine/cloudapi/compute/userList"
|
||||
ComputeTagAddAPI = "/restmachine/cloudapi/compute/tagAdd"
|
||||
ComputeTagRemoveAPI = "/restmachine/cloudapi/compute/tagRemove"
|
||||
ComputePinToStackAPI = "/restmachine/cloudapi/compute/pinToStack"
|
||||
ComputeUnpinFromStackAPI = "/restmachine/cloudapi/compute/unpinFromStack"
|
||||
ComputePfwAddAPI = "/restmachine/cloudapi/compute/pfwAdd"
|
||||
ComputePfwDelAPI = "/restmachine/cloudapi/compute/pfwDel"
|
||||
ComputeUserGrantAPI = "/restmachine/cloudapi/compute/userGrant"
|
||||
ComputeUserRevokeAPI = "/restmachine/cloudapi/compute/userRevoke"
|
||||
ComputeSnapshotCreateAPI = "/restmachine/cloudapi/compute/snapshotCreate"
|
||||
ComputeSnapshotDeleteAPI = "/restmachine/cloudapi/compute/snapshotCreate"
|
||||
ComputeSnapshotRollbackAPI = "/restmachine/cloudapi/compute/snapshotRollback"
|
||||
ComputePauseAPI = "/restmachine/cloudapi/compute/pause"
|
||||
ComputeResumeAPI = "/restmachine/cloudapi/compute/resume"
|
||||
ComputeCdInsertAPI = "/restmachine/cloudapi/compute/cdInsert"
|
||||
ComputeCdEjectAPI = "/restmachine/cloudapi/compute/cdEject"
|
||||
ComputeResetAPI = "/restmachine/cloudapi/compute/reset"
|
||||
ComputeRedeployAPI = "/restmachine/cloudapi/compute/redeploy"
|
||||
)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,74 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceComputeAuditsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
computeAudits, err := utilityComputeAuditsCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
d.Set("items", flattenComputeAudits(computeAudits))
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceComputeAuditsSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"call": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"responsetime": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"statuscode": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"user": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func DataSourceComputeAudits() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceComputeAuditsRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceComputeAuditsSchemaMake(),
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceComputeGetAuditsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
computeAudits, err := utilityComputeGetAuditsCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
d.Set("items", flattenComputeGetAudits(computeAudits))
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceComputeGetAuditsSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"epoch": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"message": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func DataSourceComputeGetAudits() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceComputeGetAuditsRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceComputeGetAuditsSchemaMake(),
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceComputeGetConsoleUrlRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
computeConsoleUrl, err := utilityComputeGetConsoleUrlCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
result := strings.ReplaceAll(string(computeConsoleUrl), "\"", "")
|
||||
result = strings.ReplaceAll(string(result), "\\", "")
|
||||
d.Set("console_url", result)
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceComputeGetConsoleUrlSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"console_url": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func DataSourceComputeGetConsoleUrl() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceComputeGetConsoleUrlRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceComputeGetConsoleUrlSchemaMake(),
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,53 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceComputeGetLogRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
computeGetLog, err := utilityComputeGetLogCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
d.Set("log", computeGetLog)
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceComputeGetLogSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"path": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"log": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func DataSourceComputeGetLog() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceComputeGetLogRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceComputeGetLogSchemaMake(),
|
||||
}
|
||||
}
|
||||
335
internal/service/cloudapi/kvmvm/data_source_compute_list.go
Normal file
335
internal/service/cloudapi/kvmvm/data_source_compute_list.go
Normal file
@@ -0,0 +1,335 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceComputeListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
computeList, err := utilityDataComputeListCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
d.Set("items", flattenComputeList(computeList))
|
||||
return nil
|
||||
}
|
||||
|
||||
func computeDisksSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"disk_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"pci_slot": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
func itemComputeSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"acl": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: computeListACLSchemaMake(),
|
||||
},
|
||||
},
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"affinity_label": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"affinity_rules": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: computeListRulesSchemaMake(),
|
||||
},
|
||||
},
|
||||
"affinity_weight": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"anti_affinity_rules": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: computeListRulesSchemaMake(),
|
||||
},
|
||||
},
|
||||
"arch": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"boot_order": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"bootdisk_size": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"clone_reference": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"clones": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"computeci_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"cpus": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"created_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"custom_fields": { //NEED
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"devices": { //NEED
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disks": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: computeDisksSchemaMake(),
|
||||
},
|
||||
},
|
||||
"driver": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"gid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"image_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"interfaces": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: computeInterfacesSchemaMake(),
|
||||
},
|
||||
},
|
||||
"lock_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"manager_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"manager_type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"migrationjob": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"pinned": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"ram": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"reference_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"registered": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"res_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"rg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"rg_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"snap_sets": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: computeSnapSetsSchemaMake(),
|
||||
},
|
||||
},
|
||||
"stateless_sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"stateless_sep_type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"tags": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"key": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"val": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"tech_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"total_disk_size": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"user_managed": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"vgpus": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"vins_connected": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"virtual_image_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func dataSourceCompputeListSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"includedeleted": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
},
|
||||
"page": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"size": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: itemComputeSchemaMake(),
|
||||
},
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func DataSourceComputeList() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceComputeListRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceCompputeListSchemaMake(),
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,81 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceComputePfwListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
computePfwList, err := utilityComputePfwListCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
d.Set("items", flattenPfwList(computePfwList))
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceComputePfwListSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"pfw_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"local_ip": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"local_port": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"protocol": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"public_port_end": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"public_port_start": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"vm_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func DataSourceComputePfwList() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceComputePfwListRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceComputePfwListSchemaMake(),
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceComputeUserListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
computeUserList, err := utilityComputeUserListCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
flattenUserList(d, computeUserList)
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceComputeUserListSchemaMake() map[string]*schema.Schema {
|
||||
res := computeACLSchemaMake()
|
||||
res["compute_id"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func DataSourceComputeUserList() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceComputeUserListRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceComputeUserListSchemaMake(),
|
||||
}
|
||||
}
|
||||
630
internal/service/cloudapi/kvmvm/flattens.go
Normal file
630
internal/service/cloudapi/kvmvm/flattens.go
Normal file
@@ -0,0 +1,630 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/status"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func flattenDisks(disks []InfoDisk) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, disk := range disks {
|
||||
temp := map[string]interface{}{
|
||||
"disk_id": disk.ID,
|
||||
"pci_slot": disk.PCISlot,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
func flattenQOS(qos QOS) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"e_rate": qos.ERate,
|
||||
"guid": qos.GUID,
|
||||
"in_brust": qos.InBurst,
|
||||
"in_rate": qos.InRate,
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
func flattenInterfaces(interfaces ListInterfaces) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, interfaceItem := range interfaces {
|
||||
temp := map[string]interface{}{
|
||||
"conn_id": interfaceItem.ConnID,
|
||||
"conn_type": interfaceItem.ConnType,
|
||||
"def_gw": interfaceItem.DefGW,
|
||||
"flip_group_id": interfaceItem.FLIPGroupID,
|
||||
"guid": interfaceItem.GUID,
|
||||
"ip_address": interfaceItem.IPAddress,
|
||||
"listen_ssh": interfaceItem.ListenSSH,
|
||||
"mac": interfaceItem.MAC,
|
||||
"name": interfaceItem.Name,
|
||||
"net_id": interfaceItem.NetID,
|
||||
"netmask": interfaceItem.NetMask,
|
||||
"net_type": interfaceItem.NetType,
|
||||
"pci_slot": interfaceItem.PCISlot,
|
||||
"qos": flattenQOS(interfaceItem.QOS),
|
||||
"target": interfaceItem.Target,
|
||||
"type": interfaceItem.Type,
|
||||
"vnfs": interfaceItem.VNFs,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
func flattenSnapSets(snapSets ListSnapSets) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, snapSet := range snapSets {
|
||||
temp := map[string]interface{}{
|
||||
"disks": snapSet.Disks,
|
||||
"guid": snapSet.GUID,
|
||||
"label": snapSet.Label,
|
||||
"timestamp": snapSet.Timestamp,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
func flattenTags(tags map[string]string) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for key, val := range tags {
|
||||
temp := map[string]interface{}{
|
||||
"key": key,
|
||||
"val": val,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenListRules(listRules ListRules) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, rule := range listRules {
|
||||
temp := map[string]interface{}{
|
||||
"guid": rule.GUID,
|
||||
"key": rule.Key,
|
||||
"mode": rule.Mode,
|
||||
"policy": rule.Policy,
|
||||
"topology": rule.Topology,
|
||||
"value": rule.Value,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
func flattenListACL(listAcl ListACL) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, acl := range listAcl {
|
||||
var explicit interface{}
|
||||
switch acl.Explicit.(type) { //Платформенный хак
|
||||
case bool:
|
||||
explicit = acl.Explicit.(bool)
|
||||
case string:
|
||||
explicit, _ = strconv.ParseBool(acl.Explicit.(string))
|
||||
}
|
||||
temp := map[string]interface{}{
|
||||
"explicit": explicit,
|
||||
"guid": acl.GUID,
|
||||
"right": acl.Right,
|
||||
"status": acl.Status,
|
||||
"type": acl.Type,
|
||||
"user_group_id": acl.UserGroupID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
func flattenComputeList(computes ListComputes) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, compute := range computes {
|
||||
customFields, _ := json.Marshal(compute.CustomFields)
|
||||
devices, _ := json.Marshal(compute.Devices)
|
||||
temp := map[string]interface{}{
|
||||
"acl": flattenListACL(compute.ACL),
|
||||
"account_id": compute.AccountID,
|
||||
"account_name": compute.AccountName,
|
||||
"affinity_label": compute.AffinityLabel,
|
||||
"affinity_rules": flattenListRules(compute.AffinityRules),
|
||||
"affinity_weight": compute.AffinityWeight,
|
||||
"anti_affinity_rules": flattenListRules(compute.AntiAffinityRules),
|
||||
"arch": compute.Architecture,
|
||||
"boot_order": compute.BootOrder,
|
||||
"bootdisk_size": compute.BootDiskSize,
|
||||
"clone_reference": compute.CloneReference,
|
||||
"clones": compute.Clones,
|
||||
"computeci_id": compute.ComputeCIID,
|
||||
"cpus": compute.CPU,
|
||||
"created_by": compute.CreatedBy,
|
||||
"created_time": compute.CreatedTime,
|
||||
"custom_fields": string(customFields),
|
||||
"deleted_by": compute.DeletedBy,
|
||||
"deleted_time": compute.DeletedTime,
|
||||
"desc": compute.Description,
|
||||
"devices": string(devices),
|
||||
"disks": flattenDisks(compute.Disks),
|
||||
"driver": compute.Driver,
|
||||
"gid": compute.GID,
|
||||
"guid": compute.GUID,
|
||||
"compute_id": compute.ID,
|
||||
"image_id": compute.ImageID,
|
||||
"interfaces": flattenInterfaces(compute.Interfaces),
|
||||
"lock_status": compute.LockStatus,
|
||||
"manager_id": compute.ManagerID,
|
||||
"manager_type": compute.ManagerType,
|
||||
"migrationjob": compute.MigrationJob,
|
||||
"milestones": compute.Milestones,
|
||||
"name": compute.Name,
|
||||
"pinned": compute.Pinned,
|
||||
"ram": compute.RAM,
|
||||
"reference_id": compute.ReferenceID,
|
||||
"registered": compute.Registered,
|
||||
"res_name": compute.ResName,
|
||||
"rg_id": compute.RGID,
|
||||
"rg_name": compute.RGName,
|
||||
"snap_sets": flattenSnapSets(compute.SnapSets),
|
||||
"stateless_sep_id": compute.StatelessSepID,
|
||||
"stateless_sep_type": compute.StatelessSepType,
|
||||
"status": compute.Status,
|
||||
"tags": flattenTags(compute.Tags),
|
||||
"tech_status": compute.TechStatus,
|
||||
"total_disk_size": compute.TotalDiskSize,
|
||||
"updated_by": compute.UpdatedBy,
|
||||
"updated_time": compute.UpdatedTime,
|
||||
"user_managed": compute.UserManaged,
|
||||
"vgpus": compute.VGPUs,
|
||||
"vins_connected": compute.VINSConnected,
|
||||
"virtual_image_id": compute.VirtualImageID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenComputeDisksDemo(disksList ListComputeDisks, extraDisks []interface{}) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(disksList))
|
||||
for _, disk := range disksList {
|
||||
if disk.Name == "bootdisk" || findInExtraDisks(uint(disk.ID), extraDisks) { //skip main bootdisk and extraDisks
|
||||
continue
|
||||
}
|
||||
temp := map[string]interface{}{
|
||||
"disk_name": disk.Name,
|
||||
"disk_id": disk.ID,
|
||||
"disk_type": disk.Type,
|
||||
"sep_id": disk.SepID,
|
||||
"shareable": disk.Shareable,
|
||||
"size_max": disk.SizeMax,
|
||||
"size_used": disk.SizeUsed,
|
||||
"pool": disk.Pool,
|
||||
"desc": disk.Description,
|
||||
"image_id": disk.ImageID,
|
||||
"size": disk.SizeMax,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
sort.Slice(res, func(i, j int) bool {
|
||||
return res[i]["disk_id"].(uint64) < res[j]["disk_id"].(uint64)
|
||||
})
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenNetwork(interfaces ListInterfaces) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(interfaces))
|
||||
//index := 0
|
||||
for _, network := range interfaces {
|
||||
temp := map[string]interface{}{
|
||||
"net_id": network.NetID,
|
||||
"net_type": network.NetType,
|
||||
"ip_address": network.IPAddress,
|
||||
"mac": network.MAC,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func findBootDisk(disks ListComputeDisks) *ItemComputeDisk {
|
||||
for _, disk := range disks {
|
||||
if disk.Name == "bootdisk" {
|
||||
return &disk
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenCompute(d *schema.ResourceData, compute RecordCompute) error {
|
||||
// This function expects that compFacts string contains response from API compute/get,
|
||||
// i.e. detailed information about compute instance.
|
||||
//
|
||||
// NOTE: this function modifies ResourceData argument - as such it should never be called
|
||||
// from resourceComputeExists(...) method
|
||||
log.Debugf("flattenCompute: ID %d, RG ID %d", compute.ID, compute.RGID)
|
||||
|
||||
devices, _ := json.Marshal(compute.Devices)
|
||||
userdata, _ := json.Marshal(compute.Userdata)
|
||||
|
||||
//check extraDisks, ipa_type, is,
|
||||
d.SetId(strconv.FormatUint(compute.ID, 10))
|
||||
d.Set("acl", flattenACL(compute.ACL))
|
||||
d.Set("account_id", compute.AccountID)
|
||||
d.Set("account_name", compute.AccountName)
|
||||
d.Set("affinity_weight", compute.AffinityWeight)
|
||||
d.Set("arch", compute.Architecture)
|
||||
d.Set("boot_order", compute.BootOrder)
|
||||
d.Set("boot_disk_size", compute.BootDiskSize)
|
||||
bootDisk := findBootDisk(compute.Disks)
|
||||
d.Set("boot_disk_id", bootDisk.ID)
|
||||
d.Set("sep_id", bootDisk.SepID)
|
||||
d.Set("pool", bootDisk.Pool)
|
||||
d.Set("clone_reference", compute.CloneReference)
|
||||
d.Set("clones", compute.Clones)
|
||||
if string(userdata) != "{}" {
|
||||
d.Set("cloud_init", string(userdata))
|
||||
}
|
||||
d.Set("computeci_id", compute.ComputeCIID)
|
||||
d.Set("created_by", compute.CreatedBy)
|
||||
d.Set("created_time", compute.CreatedTime)
|
||||
d.Set("custom_fields", flattenCustomFields(compute.CustomFields))
|
||||
d.Set("deleted_by", compute.DeletedBy)
|
||||
d.Set("deleted_time", compute.DeletedTime)
|
||||
d.Set("description", compute.Description)
|
||||
d.Set("devices", string(devices))
|
||||
err := d.Set("disks", flattenComputeDisksDemo(compute.Disks, d.Get("extra_disks").(*schema.Set).List()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Set("driver", compute.Driver)
|
||||
d.Set("cpu", compute.CPU)
|
||||
d.Set("gid", compute.GID)
|
||||
d.Set("guid", compute.GUID)
|
||||
d.Set("compute_id", compute.ID)
|
||||
if compute.VirtualImageID != 0 {
|
||||
d.Set("image_id", compute.VirtualImageID)
|
||||
} else {
|
||||
d.Set("image_id", compute.ImageID)
|
||||
}
|
||||
d.Set("interfaces", flattenInterfaces(compute.Interfaces))
|
||||
d.Set("lock_status", compute.LockStatus)
|
||||
d.Set("manager_id", compute.ManagerID)
|
||||
d.Set("manager_type", compute.ManagerType)
|
||||
d.Set("migrationjob", compute.MigrationJob)
|
||||
d.Set("milestones", compute.Milestones)
|
||||
d.Set("name", compute.Name)
|
||||
d.Set("natable_vins_id", compute.NatableVINSID)
|
||||
d.Set("natable_vins_ip", compute.NatableVINSIP)
|
||||
d.Set("natable_vins_name", compute.NatableVINSName)
|
||||
d.Set("natable_vins_network", compute.NatableVINSNetwork)
|
||||
d.Set("natable_vins_network_name", compute.NatableVINSNetworkName)
|
||||
if err := d.Set("os_users", parseOsUsers(compute.OSUsers)); err != nil {
|
||||
return err
|
||||
}
|
||||
d.Set("pinned", compute.Pinned)
|
||||
d.Set("ram", compute.RAM)
|
||||
d.Set("reference_id", compute.ReferenceID)
|
||||
d.Set("registered", compute.Registered)
|
||||
d.Set("res_name", compute.ResName)
|
||||
d.Set("rg_id", compute.RGID)
|
||||
d.Set("rg_name", compute.RGName)
|
||||
d.Set("snap_sets", flattenSnapSets(compute.SnapSets))
|
||||
d.Set("stateless_sep_id", compute.StatelessSepID)
|
||||
d.Set("stateless_sep_type", compute.StatelessSepType)
|
||||
d.Set("status", compute.Status)
|
||||
d.Set("tags", flattenTags(compute.Tags))
|
||||
d.Set("tech_status", compute.TechStatus)
|
||||
d.Set("updated_by", compute.UpdatedBy)
|
||||
d.Set("updated_time", compute.UpdatedTime)
|
||||
d.Set("user_managed", compute.UserManaged)
|
||||
d.Set("vgpus", compute.VGPUs)
|
||||
d.Set("virtual_image_id", compute.VirtualImageID)
|
||||
d.Set("virtual_image_name", compute.VirtualImageName)
|
||||
|
||||
d.Set("enabled", false)
|
||||
if compute.Status == status.Enabled {
|
||||
d.Set("enabled", true)
|
||||
}
|
||||
|
||||
d.Set("started", false)
|
||||
if compute.TechStatus == "STARTED" {
|
||||
d.Set("started", true)
|
||||
}
|
||||
|
||||
d.Set("network", flattenNetwork(compute.Interfaces))
|
||||
|
||||
//if len(model.Disks) > 0 {
|
||||
//log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(model.Disks))
|
||||
//if err = d.Set("extra_disks", parseComputeDisksToExtraDisks(model.Disks)); err != nil {
|
||||
//return err
|
||||
//}
|
||||
//}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenDataComputeDisksDemo(disksList ListComputeDisks, extraDisks []interface{}) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, disk := range disksList {
|
||||
if findInExtraDisks(uint(disk.ID), extraDisks) { //skip main bootdisk and extraDisks
|
||||
continue
|
||||
}
|
||||
temp := map[string]interface{}{
|
||||
"disk_name": disk.Name,
|
||||
"disk_id": disk.ID,
|
||||
"disk_type": disk.Type,
|
||||
"sep_id": disk.SepID,
|
||||
"shareable": disk.Shareable,
|
||||
"size_max": disk.SizeMax,
|
||||
"size_used": disk.SizeUsed,
|
||||
"pool": disk.Pool,
|
||||
"desc": disk.Description,
|
||||
"image_id": disk.ImageID,
|
||||
"size": disk.SizeMax,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenACL(acl RecordACL) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"account_acl": flattenListACL(acl.AccountACL),
|
||||
"compute_acl": flattenListACL(acl.ComputeACL),
|
||||
"rg_acl": flattenListACL(acl.RGACL),
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenAffinityRules(affinityRules ListRules) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, affinityRule := range affinityRules {
|
||||
temp := map[string]interface{}{
|
||||
"guid": affinityRule.GUID,
|
||||
"key": affinityRule.Key,
|
||||
"mode": affinityRule.Mode,
|
||||
"policy": affinityRule.Policy,
|
||||
"topology": affinityRule.Topology,
|
||||
"value": affinityRule.Value,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenIotune(iotune IOTune) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"read_bytes_sec": iotune.ReadBytesSec,
|
||||
"read_bytes_sec_max": iotune.ReadBytesSecMax,
|
||||
"read_iops_sec": iotune.ReadIOPSSec,
|
||||
"read_iops_sec_max": iotune.ReadIOPSSecMax,
|
||||
"size_iops_sec": iotune.SizeIOPSSec,
|
||||
"total_bytes_sec": iotune.TotalBytesSec,
|
||||
"total_bytes_sec_max": iotune.TotalBytesSecMax,
|
||||
"total_iops_sec": iotune.TotalIOPSSec,
|
||||
"total_iops_sec_max": iotune.TotalIOPSSecMax,
|
||||
"write_bytes_sec": iotune.WriteBytesSec,
|
||||
"write_bytes_sec_max": iotune.WriteBytesSecMax,
|
||||
"write_iops_sec": iotune.WriteIOPSSec,
|
||||
"write_iops_sec_max": iotune.WriteIOPSSecMax,
|
||||
}
|
||||
res = append(res, temp)
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenSnapshots(snapshots SnapshotExtendList) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, snapshot := range snapshots {
|
||||
temp := map[string]interface{}{
|
||||
"guid": snapshot.GUID,
|
||||
"label": snapshot.Label,
|
||||
"res_id": snapshot.ResID,
|
||||
"snap_set_guid": snapshot.SnapSetGUID,
|
||||
"snap_set_time": snapshot.SnapSetTime,
|
||||
"timestamp": snapshot.TimeStamp,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenListComputeDisks(disks ListComputeDisks) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, disk := range disks {
|
||||
acl, _ := json.Marshal(disk.ACL)
|
||||
temp := map[string]interface{}{
|
||||
"_ckey": disk.CKey,
|
||||
"acl": string(acl),
|
||||
"account_id": disk.AccountID,
|
||||
"boot_partition": disk.BootPartition,
|
||||
"created_time": disk.CreatedTime,
|
||||
"deleted_time": disk.DeletedTime,
|
||||
"description": disk.Description,
|
||||
"destruction_time": disk.DestructionTime,
|
||||
"disk_path": disk.DiskPath,
|
||||
"gid": disk.GID,
|
||||
"guid": disk.GUID,
|
||||
"disk_id": disk.ID,
|
||||
"image_id": disk.ImageID,
|
||||
"images": disk.Images,
|
||||
"iotune": flattenIotune(disk.IOTune),
|
||||
"iqn": disk.IQN,
|
||||
"login": disk.Login,
|
||||
"milestones": disk.Milestones,
|
||||
"name": disk.Name,
|
||||
"order": disk.Order,
|
||||
"params": disk.Params,
|
||||
"parent_id": disk.ParentID,
|
||||
"passwd": disk.Passwd,
|
||||
"pci_slot": disk.PCISlot,
|
||||
"pool": disk.Pool,
|
||||
"present_to": disk.PresentTo,
|
||||
"purge_time": disk.PurgeTime,
|
||||
"reality_device_number": disk.RealityDeviceNumber,
|
||||
"res_id": disk.ResID,
|
||||
"role": disk.Role,
|
||||
"sep_id": disk.SepID,
|
||||
"shareable": disk.Shareable,
|
||||
"size_max": disk.SizeMax,
|
||||
"size_used": disk.SizeUsed,
|
||||
"snapshots": flattenSnapshots(disk.Snapshots),
|
||||
"status": disk.Status,
|
||||
"tech_status": disk.TechStatus,
|
||||
"type": disk.Type,
|
||||
"vmid": disk.VMID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenCustomFields(customFileds map[string]interface{}) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for key, val := range customFileds {
|
||||
value, _ := json.Marshal(val)
|
||||
temp := map[string]interface{}{
|
||||
"key": key,
|
||||
"val": string(value),
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
func flattenOsUsers(osUsers ListOSUser) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, user := range osUsers {
|
||||
temp := map[string]interface{}{
|
||||
"guid": user.GUID,
|
||||
"login": user.Login,
|
||||
"password": user.Password,
|
||||
"public_key": user.PubKey,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenDataCompute(d *schema.ResourceData, compute RecordCompute) {
|
||||
devices, _ := json.Marshal(compute.Devices)
|
||||
userdata, _ := json.Marshal(compute.Userdata)
|
||||
d.Set("acl", flattenACL(compute.ACL))
|
||||
d.Set("account_id", compute.AccountID)
|
||||
d.Set("account_name", compute.AccountName)
|
||||
d.Set("affinity_label", compute.AffinityLabel)
|
||||
d.Set("affinity_rules", flattenAffinityRules(compute.AffinityRules))
|
||||
d.Set("affinity_weight", compute.AffinityWeight)
|
||||
d.Set("anti_affinity_rules", flattenListRules(compute.AntiAffinityRules))
|
||||
d.Set("arch", compute.Architecture)
|
||||
d.Set("boot_order", compute.BootOrder)
|
||||
d.Set("bootdisk_size", compute.BootDiskSize)
|
||||
d.Set("clone_reference", compute.CloneReference)
|
||||
d.Set("clones", compute.Clones)
|
||||
d.Set("computeci_id", compute.ComputeCIID)
|
||||
d.Set("cpus", compute.CPU)
|
||||
d.Set("created_by", compute.CreatedBy)
|
||||
d.Set("created_time", compute.CreatedTime)
|
||||
d.Set("custom_fields", flattenCustomFields(compute.CustomFields))
|
||||
d.Set("deleted_by", compute.DeletedBy)
|
||||
d.Set("deleted_time", compute.DeletedTime)
|
||||
d.Set("desc", compute.Description)
|
||||
d.Set("devices", string(devices))
|
||||
d.Set("disks", flattenListComputeDisks(compute.Disks))
|
||||
d.Set("driver", compute.Driver)
|
||||
d.Set("gid", compute.GID)
|
||||
d.Set("guid", compute.GUID)
|
||||
d.Set("compute_id", compute.ID)
|
||||
d.Set("image_id", compute.ImageID)
|
||||
d.Set("interfaces", flattenInterfaces(compute.Interfaces))
|
||||
d.Set("lock_status", compute.LockStatus)
|
||||
d.Set("manager_id", compute.ManagerID)
|
||||
d.Set("manager_type", compute.ManagerType)
|
||||
d.Set("migrationjob", compute.MigrationJob)
|
||||
d.Set("milestones", compute.Milestones)
|
||||
d.Set("name", compute.Name)
|
||||
d.Set("natable_vins_id", compute.NatableVINSID)
|
||||
d.Set("natable_vins_ip", compute.NatableVINSIP)
|
||||
d.Set("natable_vins_name", compute.NatableVINSName)
|
||||
d.Set("natable_vins_network", compute.NatableVINSNetwork)
|
||||
d.Set("natable_vins_network_name", compute.NatableVINSNetworkName)
|
||||
d.Set("os_users", flattenOsUsers(compute.OSUsers))
|
||||
d.Set("pinned", compute.Pinned)
|
||||
d.Set("ram", compute.RAM)
|
||||
d.Set("reference_id", compute.ReferenceID)
|
||||
d.Set("registered", compute.Registered)
|
||||
d.Set("res_name", compute.ResName)
|
||||
d.Set("rg_id", compute.RGID)
|
||||
d.Set("rg_name", compute.RGName)
|
||||
d.Set("snap_sets", flattenSnapSets(compute.SnapSets))
|
||||
d.Set("stateless_sep_id", compute.StatelessSepID)
|
||||
d.Set("stateless_sep_type", compute.StatelessSepType)
|
||||
d.Set("status", compute.Status)
|
||||
d.Set("tags", compute.Tags)
|
||||
d.Set("tech_status", compute.TechStatus)
|
||||
d.Set("updated_by", compute.UpdatedBy)
|
||||
d.Set("updated_time", compute.UpdatedTime)
|
||||
d.Set("user_managed", compute.UserManaged)
|
||||
d.Set("userdata", string(userdata))
|
||||
d.Set("vgpus", compute.VGPUs)
|
||||
d.Set("virtual_image_id", compute.VirtualImageID)
|
||||
d.Set("virtual_image_name", compute.VirtualImageName)
|
||||
}
|
||||
|
||||
func flattenComputeAudits(computeAudits ListAudits) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, computeAudit := range computeAudits {
|
||||
temp := map[string]interface{}{
|
||||
"call": computeAudit.Call,
|
||||
"responsetime": computeAudit.ResponseTime,
|
||||
"statuscode": computeAudit.StatusCode,
|
||||
"timestamp": computeAudit.Timestamp,
|
||||
"user": computeAudit.User,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenPfwList(computePfws ListPFWs) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, computePfw := range computePfws {
|
||||
temp := map[string]interface{}{
|
||||
"pfw_id": computePfw.ID,
|
||||
"local_ip": computePfw.LocalIP,
|
||||
"local_port": computePfw.LocalPort,
|
||||
"protocol": computePfw.Protocol,
|
||||
"public_port_end": computePfw.PublicPortEnd,
|
||||
"public_port_start": computePfw.PublicPortStart,
|
||||
"vm_id": computePfw.VMID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenUserList(d *schema.ResourceData, userList RecordACL) {
|
||||
d.Set("account_acl", flattenListACL(userList.AccountACL))
|
||||
d.Set("compute_acl", flattenListACL(userList.ComputeACL))
|
||||
d.Set("rg_acl", flattenListACL(userList.RGACL))
|
||||
}
|
||||
|
||||
func flattenComputeGetAudits(computeAudits ListShortAudits) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, computeAudit := range computeAudits {
|
||||
temp := map[string]interface{}{
|
||||
"epoch": computeAudit.Epoch,
|
||||
"message": computeAudit.Message,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
@@ -72,8 +72,9 @@ type DiskRecord struct {
|
||||
Role string `json:"role"`
|
||||
SepType string `json:"sepType"`
|
||||
SepID int `json:"sepId"` // NOTE: absent from compute/get output
|
||||
Shareable bool `json:"shareable"`
|
||||
SizeMax int `json:"sizeMax"`
|
||||
SizeUsed int `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
|
||||
SizeUsed float64 `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
|
||||
Snapshots []SnapshotRecord `json:"snapshots"`
|
||||
Status string `json:"status"`
|
||||
TechStatus string `json:"techStatus"`
|
||||
@@ -116,7 +117,7 @@ type SnapshotRecord struct {
|
||||
TimeStamp uint64 `json:"timestamp"`
|
||||
}
|
||||
|
||||
type SnapshotRecordList []SnapshotRecord
|
||||
//type SnapshotRecordList []SnapshotRecord
|
||||
|
||||
type ComputeGetResp struct {
|
||||
// ACLs `json:"ACL"` - it is a dictionary, special parsing required
|
||||
@@ -189,3 +190,888 @@ type ComputeBriefRecord struct { // this is a brief compute specifiaction as ret
|
||||
}
|
||||
|
||||
type RgListComputesResp []ComputeBriefRecord
|
||||
|
||||
//#############
|
||||
|
||||
// Access Control List
|
||||
type RecordACL struct {
|
||||
// Account ACL list
|
||||
AccountACL ListACL `json:"accountAcl"`
|
||||
|
||||
// Compute ACL list
|
||||
ComputeACL ListACL `json:"computeAcl"`
|
||||
|
||||
// Resource group ACL list
|
||||
RGACL ListACL `json:"rgAcl"`
|
||||
}
|
||||
|
||||
// ACL information
|
||||
type ItemACL struct {
|
||||
// Explicit
|
||||
Explicit interface{} `json:"explicit"`
|
||||
|
||||
// GUID
|
||||
GUID string `json:"guid"`
|
||||
|
||||
// Right
|
||||
Right string `json:"right"`
|
||||
|
||||
// Status
|
||||
Status string `json:"status"`
|
||||
|
||||
// Type
|
||||
Type string `json:"type"`
|
||||
|
||||
// User group ID
|
||||
UserGroupID string `json:"userGroupId"`
|
||||
}
|
||||
|
||||
// List ACL
|
||||
type ListACL []ItemACL
|
||||
|
||||
// Main information about usage snapshot
|
||||
type ItemUsageSnapshot struct {
|
||||
// Count
|
||||
Count uint64 `json:"count,omitempty"`
|
||||
|
||||
// Stored
|
||||
Stored float64 `json:"stored"`
|
||||
|
||||
// Label
|
||||
Label string `json:"label,omitempty"`
|
||||
|
||||
// Timestamp
|
||||
Timestamp uint64 `json:"timestamp,omitempty"`
|
||||
}
|
||||
|
||||
// List of usage snapshot
|
||||
type ListUsageSnapshots []ItemUsageSnapshot
|
||||
|
||||
// Main information about snapshot
|
||||
type ItemSnapshot struct {
|
||||
// List disk ID
|
||||
Disks []uint64 `json:"disks"`
|
||||
|
||||
// GUID
|
||||
GUID string `json:"guid"`
|
||||
|
||||
// Label
|
||||
Label string `json:"label"`
|
||||
|
||||
// Timestamp
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
}
|
||||
|
||||
// List of snapshots
|
||||
type ListSnapShots []ItemSnapshot
|
||||
|
||||
// Main information about port forward
|
||||
type ItemPFW struct {
|
||||
// ID
|
||||
ID uint64 `json:"id"`
|
||||
|
||||
// Local IP
|
||||
LocalIP string `json:"localIp"`
|
||||
|
||||
// Local port
|
||||
LocalPort uint64 `json:"localPort"`
|
||||
|
||||
// Protocol
|
||||
Protocol string `json:"protocol"`
|
||||
|
||||
// Public port end
|
||||
PublicPortEnd uint64 `json:"publicPortEnd"`
|
||||
|
||||
// Public port start
|
||||
PublicPortStart uint64 `json:"publicPortStart"`
|
||||
|
||||
// Virtuel machine ID
|
||||
VMID uint64 `json:"vmId"`
|
||||
}
|
||||
|
||||
// List port forwards
|
||||
type ListPFWs []ItemPFW
|
||||
|
||||
// Main information about affinity relations
|
||||
type RecordAffinityRelations struct {
|
||||
// Other node
|
||||
OtherNode []interface{} `json:"otherNode"`
|
||||
|
||||
// Other node indirect
|
||||
OtherNodeIndirect []interface{} `json:"otherNodeIndirect"`
|
||||
|
||||
// Other node indirect soft
|
||||
OtherNodeIndirectSoft []interface{} `json:"otherNodeIndirectSoft"`
|
||||
|
||||
// Other node soft
|
||||
OtherNodeSoft []interface{} `json:"otherNodeSoft"`
|
||||
|
||||
// Same node
|
||||
SameNode []interface{} `json:"sameNode"`
|
||||
|
||||
// Same node soft
|
||||
SameNodeSoft []interface{} `json:"sameNodeSoft"`
|
||||
}
|
||||
|
||||
// Main information about attached network
|
||||
type RecordNetAttach struct {
|
||||
// Connection ID
|
||||
ConnID uint64 `json:"connId"`
|
||||
|
||||
// Connection type
|
||||
ConnType string `json:"connType"`
|
||||
|
||||
// Default GW
|
||||
DefGW string `json:"defGw"`
|
||||
|
||||
// FLIPGroup ID
|
||||
FLIPGroupID uint64 `json:"flipgroupId"`
|
||||
|
||||
// GUID
|
||||
GUID string `json:"guid"`
|
||||
|
||||
// IP address
|
||||
IPAddress string `json:"ipAddress"`
|
||||
|
||||
// Listen SSH
|
||||
ListenSSH bool `json:"listenSsh"`
|
||||
|
||||
// MAC
|
||||
MAC string `json:"mac"`
|
||||
|
||||
// Name
|
||||
Name string `json:"name"`
|
||||
|
||||
// Network ID
|
||||
NetID uint64 `json:"netId"`
|
||||
|
||||
// Network mask
|
||||
NetMask uint64 `json:"netMask"`
|
||||
|
||||
// Network type
|
||||
NetType string `json:"netType"`
|
||||
|
||||
// PCI slot
|
||||
PCISlot uint64 `json:"pciSlot"`
|
||||
|
||||
// QOS
|
||||
QOS QOS `json:"qos"`
|
||||
|
||||
// Target
|
||||
Target string `json:"target"`
|
||||
|
||||
// Type
|
||||
Type string `json:"type"`
|
||||
|
||||
// List VNF IDs
|
||||
VNFs []uint64 `json:"vnfs"`
|
||||
}
|
||||
|
||||
// Detailed information about audit
|
||||
type ItemAudit struct {
|
||||
// Call
|
||||
Call string `json:"call"`
|
||||
|
||||
// Response time
|
||||
ResponseTime float64 `json:"responsetime"`
|
||||
|
||||
// Status code
|
||||
StatusCode uint64 `json:"statuscode"`
|
||||
|
||||
// Timestamp
|
||||
Timestamp float64 `json:"timestamp"`
|
||||
|
||||
// User
|
||||
User string `json:"user"`
|
||||
}
|
||||
|
||||
// List Detailed audits
|
||||
type ListAudits []ItemAudit
|
||||
|
||||
// Short information about audit
|
||||
type ItemShortAudit struct {
|
||||
// Epoch
|
||||
Epoch float64 `json:"epoch"`
|
||||
|
||||
// Message
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// List short audits
|
||||
type ListShortAudits []ItemShortAudit
|
||||
|
||||
// Main information about rule
|
||||
type ItemRule struct {
|
||||
// GUID
|
||||
GUID string `json:"guid"`
|
||||
|
||||
// Key
|
||||
Key string `json:"key"`
|
||||
|
||||
// Mode
|
||||
Mode string `json:"mode"`
|
||||
|
||||
// Policy
|
||||
Policy string `json:"policy"`
|
||||
|
||||
// Topology
|
||||
Topology string `json:"topology"`
|
||||
|
||||
// Value
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// List rules
|
||||
type ListRules []ItemRule
|
||||
|
||||
// Detailed information about compute
|
||||
type RecordCompute struct {
|
||||
// Access Control List
|
||||
ACL RecordACL `json:"ACL"`
|
||||
|
||||
// Account ID
|
||||
AccountID uint64 `json:"accountId"`
|
||||
|
||||
// Account name
|
||||
AccountName string `json:"accountName"`
|
||||
|
||||
// Affinity label
|
||||
AffinityLabel string `json:"affinityLabel"`
|
||||
|
||||
// List affinity rules
|
||||
AffinityRules ListRules `json:"affinityRules"`
|
||||
|
||||
// Affinity weight
|
||||
AffinityWeight uint64 `json:"affinityWeight"`
|
||||
|
||||
// List anti affinity rules
|
||||
AntiAffinityRules ListRules `json:"antiAffinityRules"`
|
||||
|
||||
// Architecture
|
||||
Architecture string `json:"arch"`
|
||||
|
||||
// Boot order
|
||||
BootOrder []string `json:"bootOrder"`
|
||||
|
||||
// Boot disk size
|
||||
BootDiskSize uint64 `json:"bootdiskSize"`
|
||||
|
||||
// Clone reference
|
||||
CloneReference uint64 `json:"cloneReference"`
|
||||
|
||||
// List clone IDs
|
||||
Clones []uint64 `json:"clones"`
|
||||
|
||||
// Compute CI ID
|
||||
ComputeCIID uint64 `json:"computeciId"`
|
||||
|
||||
// Number of cores
|
||||
CPU uint64 `json:"cpus"`
|
||||
|
||||
// Created by
|
||||
CreatedBy string `json:"createdBy"`
|
||||
|
||||
// Created time
|
||||
CreatedTime uint64 `json:"createdTime"`
|
||||
|
||||
// Custom fields items
|
||||
CustomFields map[string]interface{} `json:"customFields"`
|
||||
|
||||
// Deleted by
|
||||
DeletedBy string `json:"deletedBy"`
|
||||
|
||||
// Deleted time
|
||||
DeletedTime uint64 `json:"deletedTime"`
|
||||
|
||||
// Description
|
||||
Description string `json:"desc"`
|
||||
|
||||
// Devices
|
||||
Devices interface{} `json:"devices"`
|
||||
|
||||
// List disks in compute
|
||||
Disks ListComputeDisks `json:"disks"`
|
||||
|
||||
// Driver
|
||||
Driver string `json:"driver"`
|
||||
|
||||
// Grid ID
|
||||
GID uint64 `json:"gid"`
|
||||
|
||||
// GUID
|
||||
GUID uint64 `json:"guid"`
|
||||
|
||||
// ID
|
||||
ID uint64 `json:"id"`
|
||||
|
||||
// Image ID
|
||||
ImageID uint64 `json:"imageId"`
|
||||
|
||||
// Image name
|
||||
ImageName string `json:"imageName"`
|
||||
|
||||
// List interfaces
|
||||
Interfaces ListInterfaces `json:"interfaces"`
|
||||
|
||||
// Lock status
|
||||
LockStatus string `json:"lockStatus"`
|
||||
|
||||
// Manager ID
|
||||
ManagerID uint64 `json:"managerId"`
|
||||
|
||||
// Manager type
|
||||
ManagerType string `json:"managerType"`
|
||||
|
||||
// Migration job
|
||||
MigrationJob uint64 `json:"migrationjob"`
|
||||
|
||||
// Milestones
|
||||
Milestones uint64 `json:"milestones"`
|
||||
|
||||
// Name
|
||||
Name string `json:"name"`
|
||||
|
||||
// Natable VINS ID
|
||||
NatableVINSID uint64 `json:"natableVinsId"`
|
||||
|
||||
// Natable VINS IP
|
||||
NatableVINSIP string `json:"natableVinsIp"`
|
||||
|
||||
// Natable VINS Name
|
||||
NatableVINSName string `json:"natableVinsName"`
|
||||
|
||||
// Natable VINS network
|
||||
NatableVINSNetwork string `json:"natableVinsNetwork"`
|
||||
|
||||
// Natable VINS network name
|
||||
NatableVINSNetworkName string `json:"natableVinsNetworkName"`
|
||||
|
||||
// List OS Users
|
||||
OSUsers ListOSUser `json:"osUsers"`
|
||||
|
||||
// Pinned or not
|
||||
Pinned bool `json:"pinned"`
|
||||
|
||||
// Number of RAM
|
||||
RAM uint64 `json:"ram"`
|
||||
|
||||
// Reference ID
|
||||
ReferenceID string `json:"referenceId"`
|
||||
|
||||
// Registered or not
|
||||
Registered bool `json:"registered"`
|
||||
|
||||
// Resource name
|
||||
ResName string `json:"resName"`
|
||||
|
||||
// Resource group ID
|
||||
RGID uint64 `json:"rgId"`
|
||||
|
||||
// Resource group name
|
||||
RGName string `json:"rgName"`
|
||||
|
||||
// List snapsets
|
||||
SnapSets ListSnapSets `json:"snapSets"`
|
||||
|
||||
// Stateless SepID
|
||||
StatelessSepID uint64 `json:"statelessSepId"`
|
||||
|
||||
// Stateless SepType
|
||||
StatelessSepType string `json:"statelessSepType"`
|
||||
|
||||
// Status
|
||||
Status string `json:"status"`
|
||||
|
||||
// Tags
|
||||
Tags map[string]string `json:"tags"`
|
||||
|
||||
// Tech status
|
||||
TechStatus string `json:"techStatus"`
|
||||
|
||||
// Updated by
|
||||
UpdatedBy string `json:"updatedBy"`
|
||||
|
||||
// Updated time
|
||||
UpdatedTime uint64 `json:"updatedTime"`
|
||||
|
||||
// User Managed or not
|
||||
UserManaged bool `json:"userManaged"`
|
||||
|
||||
// Userdata
|
||||
Userdata interface{} `json:"userdata"`
|
||||
|
||||
// vGPU IDs
|
||||
VGPUs []uint64 `json:"vgpus"`
|
||||
|
||||
// Virtual image ID
|
||||
VirtualImageID uint64 `json:"virtualImageId"`
|
||||
|
||||
// Virtual image name
|
||||
VirtualImageName string `json:"virtualImageName"`
|
||||
}
|
||||
|
||||
// Main information about OS user
|
||||
type ItemOSUser struct {
|
||||
// GUID
|
||||
GUID string `json:"guid"`
|
||||
|
||||
// Login
|
||||
Login string `json:"login"`
|
||||
|
||||
// Password
|
||||
Password string `json:"password"`
|
||||
|
||||
// Public key
|
||||
PubKey string `json:"pubkey"`
|
||||
}
|
||||
|
||||
// List OS users
|
||||
type ListOSUser []ItemOSUser
|
||||
|
||||
// Main information about snapsets
|
||||
type ItemSnapSet struct {
|
||||
// List disk IDs
|
||||
Disks []uint64 `json:"disks"`
|
||||
|
||||
// GUID
|
||||
GUID string `json:"guid"`
|
||||
|
||||
// Label
|
||||
Label string `json:"label"`
|
||||
|
||||
// Timestamp
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
}
|
||||
|
||||
// List snapsets
|
||||
type ListSnapSets []ItemSnapSet
|
||||
|
||||
// Main information about VNF
|
||||
type ItemVNFInterface struct {
|
||||
// Connection ID
|
||||
ConnID uint64 `json:"connId"`
|
||||
|
||||
// Connection type
|
||||
ConnType string `json:"connType"`
|
||||
|
||||
// Default GW
|
||||
DefGW string `json:"defGw"`
|
||||
|
||||
// FLIPGroup ID
|
||||
FLIPGroupID uint64 `json:"flipgroupId"`
|
||||
|
||||
// GUID
|
||||
GUID string `json:"guid"`
|
||||
|
||||
// IP address
|
||||
IPAddress string `json:"ipAddress"`
|
||||
|
||||
// Listen SSH or not
|
||||
ListenSSH bool `json:"listenSsh"`
|
||||
|
||||
// MAC
|
||||
MAC string `json:"mac"`
|
||||
|
||||
// Name
|
||||
Name string `json:"name"`
|
||||
|
||||
// Network ID
|
||||
NetID uint64 `json:"netId"`
|
||||
|
||||
// Network mask
|
||||
NetMask uint64 `json:"netMask"`
|
||||
|
||||
// Network type
|
||||
NetType string `json:"netType"`
|
||||
|
||||
// PCI slot
|
||||
PCISlot uint64 `json:"pciSlot"`
|
||||
|
||||
// QOS
|
||||
QOS QOS `json:"qos"`
|
||||
|
||||
// Target
|
||||
Target string `json:"target"`
|
||||
|
||||
// Type
|
||||
Type string `json:"type"`
|
||||
|
||||
// List VNF IDs
|
||||
VNFs []uint64 `json:"vnfs"`
|
||||
}
|
||||
|
||||
type QOS struct {
|
||||
ERate uint64 `json:"eRate"`
|
||||
GUID string `json:"guid"`
|
||||
InBurst uint64 `json:"inBurst"`
|
||||
InRate uint64 `json:"inRate"`
|
||||
}
|
||||
|
||||
// List VNF interfaces
|
||||
type ListInterfaces []ItemVNFInterface
|
||||
|
||||
// List compute disks
|
||||
type ListComputeDisks []ItemComputeDisk
|
||||
|
||||
// Main information about compute disk
|
||||
type ItemComputeDisk struct {
|
||||
// CKey
|
||||
CKey string `json:"_ckey"`
|
||||
|
||||
// Access Control List
|
||||
ACL map[string]interface{} `json:"acl"`
|
||||
|
||||
// Account ID
|
||||
AccountID uint64 `json:"accountId"`
|
||||
|
||||
// Boot partition
|
||||
BootPartition uint64 `json:"bootPartition"`
|
||||
|
||||
// Created time
|
||||
CreatedTime uint64 `json:"createdTime"`
|
||||
|
||||
// Deleted time
|
||||
DeletedTime uint64 `json:"deletedTime"`
|
||||
|
||||
// Description
|
||||
Description string `json:"desc"`
|
||||
|
||||
// Destruction time
|
||||
DestructionTime uint64 `json:"destructionTime"`
|
||||
|
||||
// Disk path
|
||||
DiskPath string `json:"diskPath"`
|
||||
|
||||
// Grid ID
|
||||
GID uint64 `json:"gid"`
|
||||
|
||||
// GUID
|
||||
GUID uint64 `json:"guid"`
|
||||
|
||||
// ID
|
||||
ID uint64 `json:"id"`
|
||||
|
||||
// Image ID
|
||||
ImageID uint64 `json:"imageId"`
|
||||
|
||||
// List image IDs
|
||||
Images []uint64 `json:"images"`
|
||||
|
||||
// IO tune
|
||||
IOTune IOTune `json:"iotune"`
|
||||
|
||||
// IQN
|
||||
IQN string `json:"iqn"`
|
||||
|
||||
// Login
|
||||
Login string `json:"login"`
|
||||
|
||||
// Milestones
|
||||
Milestones uint64 `json:"milestones"`
|
||||
|
||||
// Name
|
||||
Name string `json:"name"`
|
||||
|
||||
// Order
|
||||
Order uint64 `json:"order"`
|
||||
|
||||
// Params
|
||||
Params string `json:"params"`
|
||||
|
||||
// Parent ID
|
||||
ParentID uint64 `json:"parentId"`
|
||||
|
||||
// Password
|
||||
Passwd string `json:"passwd"`
|
||||
|
||||
// PCI slot
|
||||
PCISlot uint64 `json:"pciSlot"`
|
||||
|
||||
// Pool
|
||||
Pool string `json:"pool"`
|
||||
|
||||
// Present to
|
||||
PresentTo []uint64 `json:"presentTo"`
|
||||
|
||||
// Purge time
|
||||
PurgeTime uint64 `json:"purgeTime"`
|
||||
|
||||
// Reality device number
|
||||
RealityDeviceNumber uint64 `json:"realityDeviceNumber"`
|
||||
|
||||
// Resource ID
|
||||
ResID string `json:"resId"`
|
||||
|
||||
// Role
|
||||
Role string `json:"role"`
|
||||
|
||||
// SepID
|
||||
SepID uint64 `json:"sepId"`
|
||||
|
||||
// Shareable
|
||||
Shareable bool `json:"shareable"`
|
||||
|
||||
// Size max
|
||||
SizeMax uint64 `json:"sizeMax"`
|
||||
|
||||
//Size used
|
||||
SizeUsed float64 `json:"sizeUsed"`
|
||||
|
||||
// List extend snapshots
|
||||
Snapshots SnapshotExtendList `json:"snapshots"`
|
||||
|
||||
// Status
|
||||
Status string `json:"status"`
|
||||
|
||||
// Tech status
|
||||
TechStatus string `json:"techStatus"`
|
||||
|
||||
// Type
|
||||
Type string `json:"type"`
|
||||
|
||||
// Virtual machine ID
|
||||
VMID uint64 `json:"vmid"`
|
||||
}
|
||||
|
||||
// Main information about snapshot extend
|
||||
type SnapshotExtend struct {
|
||||
// GUID
|
||||
GUID string `json:"guid"`
|
||||
|
||||
// Label
|
||||
Label string `json:"label"`
|
||||
|
||||
// Resource ID
|
||||
ResID string `json:"resId"`
|
||||
|
||||
// SnapSetGUID
|
||||
SnapSetGUID string `json:"snapSetGuid"`
|
||||
|
||||
// SnapSetTime
|
||||
SnapSetTime uint64 `json:"snapSetTime"`
|
||||
|
||||
// TimeStamp
|
||||
TimeStamp uint64 `json:"timestamp"`
|
||||
}
|
||||
|
||||
// List Snapshot Extend
|
||||
type SnapshotExtendList []SnapshotExtend
|
||||
|
||||
// Main information about IO tune
|
||||
type IOTune struct {
|
||||
// ReadBytesSec
|
||||
ReadBytesSec uint64 `json:"read_bytes_sec"`
|
||||
|
||||
// ReadBytesSecMax
|
||||
ReadBytesSecMax uint64 `json:"read_bytes_sec_max"`
|
||||
|
||||
// ReadIOPSSec
|
||||
ReadIOPSSec uint64 `json:"read_iops_sec"`
|
||||
|
||||
// ReadIOPSSecMax
|
||||
ReadIOPSSecMax uint64 `json:"read_iops_sec_max"`
|
||||
|
||||
// SizeIOPSSec
|
||||
SizeIOPSSec uint64 `json:"size_iops_sec"`
|
||||
|
||||
// TotalBytesSec
|
||||
TotalBytesSec uint64 `json:"total_bytes_sec"`
|
||||
|
||||
// TotalBytesSecMax
|
||||
TotalBytesSecMax uint64 `json:"total_bytes_sec_max"`
|
||||
|
||||
// TotalIOPSSec
|
||||
TotalIOPSSec uint64 `json:"total_iops_sec"`
|
||||
|
||||
// TotalIOPSSecMax
|
||||
TotalIOPSSecMax uint64 `json:"total_iops_sec_max"`
|
||||
|
||||
// WriteBytesSec
|
||||
WriteBytesSec uint64 `json:"write_bytes_sec"`
|
||||
|
||||
// WriteBytesSecMax
|
||||
WriteBytesSecMax uint64 `json:"write_bytes_sec_max"`
|
||||
|
||||
// WriteIOPSSec
|
||||
WriteIOPSSec uint64 `json:"write_iops_sec"`
|
||||
|
||||
// WriteIOPSSecMax
|
||||
WriteIOPSSecMax uint64 `json:"write_iops_sec_max"`
|
||||
}
|
||||
|
||||
// Main information about compute
|
||||
type ItemCompute struct {
|
||||
// Access Control List
|
||||
ACL ListACL `json:"acl"`
|
||||
|
||||
// Account ID
|
||||
AccountID uint64 `json:"accountId"`
|
||||
|
||||
// Account name
|
||||
AccountName string `json:"accountName"`
|
||||
|
||||
// Affinity label
|
||||
AffinityLabel string `json:"affinityLabel"`
|
||||
|
||||
// List affinity rules
|
||||
AffinityRules ListRules `json:"affinityRules"`
|
||||
|
||||
// Affinity weight
|
||||
AffinityWeight uint64 `json:"affinityWeight"`
|
||||
|
||||
// List anti affinity rules
|
||||
AntiAffinityRules ListRules `json:"antiAffinityRules"`
|
||||
|
||||
// Architecture
|
||||
Architecture string `json:"arch"`
|
||||
|
||||
// Boot order
|
||||
BootOrder []string `json:"bootOrder"`
|
||||
|
||||
// Boot disk size
|
||||
BootDiskSize uint64 `json:"bootdiskSize"`
|
||||
|
||||
// Clone reference
|
||||
CloneReference uint64 `json:"cloneReference"`
|
||||
|
||||
// List clone IDs
|
||||
Clones []uint64 `json:"clones"`
|
||||
|
||||
// Compute CI ID
|
||||
ComputeCIID uint64 `json:"computeciId"`
|
||||
|
||||
// Number of cores
|
||||
CPU uint64 `json:"cpus"`
|
||||
|
||||
// Created by
|
||||
CreatedBy string `json:"createdBy"`
|
||||
|
||||
// Created time
|
||||
CreatedTime uint64 `json:"createdTime"`
|
||||
|
||||
// Custom fields list
|
||||
CustomFields map[string]interface{} `json:"customFields"`
|
||||
|
||||
// Deleted by
|
||||
DeletedBy string `json:"deletedBy"`
|
||||
|
||||
// Deleted time
|
||||
DeletedTime uint64 `json:"deletedTime"`
|
||||
|
||||
// Description
|
||||
Description string `json:"desc"`
|
||||
|
||||
// Devices
|
||||
Devices interface{} `json:"devices"`
|
||||
|
||||
// List disk items
|
||||
Disks []InfoDisk `json:"disks"`
|
||||
|
||||
// Driver
|
||||
Driver string `json:"driver"`
|
||||
|
||||
// Grid ID
|
||||
GID uint64 `json:"gid"`
|
||||
|
||||
// GUID
|
||||
GUID uint64 `json:"guid"`
|
||||
|
||||
// ID
|
||||
ID uint64 `json:"id"`
|
||||
|
||||
// Image ID
|
||||
ImageID uint64 `json:"imageId"`
|
||||
|
||||
// List interfaces
|
||||
Interfaces ListInterfaces `json:"interfaces"`
|
||||
|
||||
// Lock status
|
||||
LockStatus string `json:"lockStatus"`
|
||||
|
||||
// Manager ID
|
||||
ManagerID uint64 `json:"managerId"`
|
||||
|
||||
// Manager type
|
||||
ManagerType string `json:"managerType"`
|
||||
|
||||
// Migration job
|
||||
MigrationJob uint64 `json:"migrationjob"`
|
||||
|
||||
// Milestones
|
||||
Milestones uint64 `json:"milestones"`
|
||||
|
||||
// Name
|
||||
Name string `json:"name"`
|
||||
|
||||
// Pinned or not
|
||||
Pinned bool `json:"pinned"`
|
||||
|
||||
// Number of RAM
|
||||
RAM uint64 `json:"ram"`
|
||||
|
||||
// Reference ID
|
||||
ReferenceID string `json:"referenceId"`
|
||||
|
||||
// Registered
|
||||
Registered bool `json:"registered"`
|
||||
|
||||
// Resource name
|
||||
ResName string `json:"resName"`
|
||||
|
||||
// Resource group ID
|
||||
RGID uint64 `json:"rgId"`
|
||||
|
||||
// Resource group name
|
||||
RGName string `json:"rgName"`
|
||||
|
||||
// List snapsets
|
||||
SnapSets ListSnapSets `json:"snapSets"`
|
||||
|
||||
// Stateless SepID
|
||||
StatelessSepID uint64 `json:"statelessSepId"`
|
||||
|
||||
// Stateless SepType
|
||||
StatelessSepType string `json:"statelessSepType"`
|
||||
|
||||
// Status
|
||||
Status string `json:"status"`
|
||||
|
||||
// Tags
|
||||
Tags map[string]string `json:"tags"`
|
||||
|
||||
// Tech status
|
||||
TechStatus string `json:"techStatus"`
|
||||
|
||||
// Total disk size
|
||||
TotalDiskSize uint64 `json:"totalDisksSize"`
|
||||
|
||||
// Updated by
|
||||
UpdatedBy string `json:"updatedBy"`
|
||||
|
||||
// Updated time
|
||||
UpdatedTime uint64 `json:"updatedTime"`
|
||||
|
||||
// User Managed or not
|
||||
UserManaged bool `json:"userManaged"`
|
||||
|
||||
// List vGPU IDs
|
||||
VGPUs []uint64 `json:"vgpus"`
|
||||
|
||||
// VINS connected
|
||||
VINSConnected uint64 `json:"vinsConnected"`
|
||||
|
||||
// Virtual image ID
|
||||
VirtualImageID uint64 `json:"virtualImageId"`
|
||||
}
|
||||
|
||||
// Information Disk
|
||||
type InfoDisk struct {
|
||||
// ID
|
||||
ID uint64 `json:"id"`
|
||||
|
||||
// PCISlot
|
||||
PCISlot uint64 `json:"pciSlot"`
|
||||
}
|
||||
|
||||
// List information about computes
|
||||
type ListComputes []ItemCompute
|
||||
|
||||
@@ -38,13 +38,13 @@ import (
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
)
|
||||
|
||||
func parseOsUsers(logins []OsUserRecord) []interface{} {
|
||||
func parseOsUsers(logins ListOSUser) []interface{} {
|
||||
var result = make([]interface{}, len(logins))
|
||||
|
||||
for index, value := range logins {
|
||||
elem := make(map[string]interface{})
|
||||
|
||||
elem["guid"] = value.Guid
|
||||
elem["guid"] = value.GUID
|
||||
elem["login"] = value.Login
|
||||
elem["password"] = value.Password
|
||||
elem["public_key"] = value.PubKey
|
||||
@@ -70,9 +70,9 @@ func osUsersSubresourceSchemaMake() map[string]*schema.Schema {
|
||||
},
|
||||
|
||||
"password": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Sensitive: true,
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
//Sensitive: true,
|
||||
Description: "Password of this guest OS user.",
|
||||
},
|
||||
|
||||
|
||||
106
internal/service/cloudapi/kvmvm/resource_check_input_values.go
Normal file
106
internal/service/cloudapi/kvmvm/resource_check_input_values.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func existRgID(ctx context.Context, d *schema.ResourceData, m interface{}) bool {
|
||||
log.Debugf("resourceComputeCreate: check access for RG ID: %v", d.Get("rg_id").(int))
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
rgList := []struct {
|
||||
ID int `json:"id"`
|
||||
}{}
|
||||
|
||||
rgListAPI := "/restmachine/cloudapi/rg/list"
|
||||
urlValues.Add("includedeleted", "false")
|
||||
rgListRaw, err := c.DecortAPICall(ctx, "POST", rgListAPI, urlValues)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
err = json.Unmarshal([]byte(rgListRaw), &rgList)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
rgId := d.Get("rg_id").(int)
|
||||
for _, rg := range rgList {
|
||||
if rg.ID == rgId {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func existImageId(ctx context.Context, d *schema.ResourceData, m interface{}) bool {
|
||||
log.Debugf("resourceComputeCreate: check access for image ID: %v", d.Get("image_id").(int))
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
imageList := []struct {
|
||||
ID int `json:"id"`
|
||||
}{}
|
||||
imageListAPI := "/restmachine/cloudapi/image/list"
|
||||
imageListRaw, err := c.DecortAPICall(ctx, "POST", imageListAPI, urlValues)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
err = json.Unmarshal([]byte(imageListRaw), &imageList)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
imageId := d.Get("image_id").(int)
|
||||
for _, image := range imageList {
|
||||
if image.ID == imageId {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func existVinsIdInList(vinsId int, vinsList []struct {
|
||||
ID int `json:"id"`
|
||||
}) bool {
|
||||
for _, vins := range vinsList {
|
||||
if vinsId == vins.ID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func existVinsId(ctx context.Context, d *schema.ResourceData, m interface{}) (int, bool) {
|
||||
log.Debugf("resourceComputeCreate: check access for vinses IDs")
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
|
||||
vinsListAPI := "/restmachine/cloudapi/vins/list"
|
||||
urlValues.Add("includeDeleted", "false")
|
||||
vinsList := []struct {
|
||||
ID int `json:"id"`
|
||||
}{}
|
||||
vinsListRaw, err := c.DecortAPICall(ctx, "POST", vinsListAPI, urlValues)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
err = json.Unmarshal([]byte(vinsListRaw), &vinsList)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
networks := d.Get("network").(*schema.Set).List()
|
||||
|
||||
for _, networkInterface := range networks {
|
||||
|
||||
networkItem := networkInterface.(map[string]interface{})
|
||||
if !existVinsIdInList(networkItem["net_id"].(int), vinsList) {
|
||||
return networkItem["net_id"].(int), false
|
||||
}
|
||||
}
|
||||
return 0, true
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -37,7 +37,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -242,90 +241,20 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) {
|
||||
// This function tries to locate Compute by one of the following approaches:
|
||||
// - if compute_id is specified - locate by compute ID
|
||||
// - if compute_name is specified - locate by a combination of compute name and resource
|
||||
// group ID
|
||||
//
|
||||
// If succeeded, it returns non-empty string that contains JSON formatted facts about the
|
||||
// Compute as returned by compute/get API call.
|
||||
// Otherwise it returns empty string and meaningful error.
|
||||
//
|
||||
// This function does not modify its ResourceData argument, so it is safe to use it as core
|
||||
// method for resource's Exists method.
|
||||
//
|
||||
|
||||
func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (RecordCompute, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
compute := &RecordCompute{}
|
||||
|
||||
// make it possible to use "read" & "check presence" functions with compute ID set so
|
||||
// that Import of Compute resource is possible
|
||||
idSet := false
|
||||
theId, err := strconv.Atoi(d.Id())
|
||||
if err != nil || theId <= 0 {
|
||||
computeId, argSet := d.GetOk("compute_id") // NB: compute_id is NOT present in computeResource schema!
|
||||
if argSet {
|
||||
theId = computeId.(int)
|
||||
idSet = true
|
||||
}
|
||||
} else {
|
||||
idSet = true
|
||||
}
|
||||
|
||||
if idSet {
|
||||
// compute ID is specified, try to get compute instance straight by this ID
|
||||
log.Debugf("utilityComputeCheckPresence: locating compute by its ID %d", theId)
|
||||
urlValues.Add("computeId", fmt.Sprintf("%d", theId))
|
||||
computeFacts, err := c.DecortAPICall(ctx, "POST", ComputeGetAPI, urlValues)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return computeFacts, nil
|
||||
}
|
||||
|
||||
// ID was not set in the schema upon entering this function - work through Compute name
|
||||
// and RG ID
|
||||
computeName, argSet := d.GetOk("name")
|
||||
if !argSet {
|
||||
return "", fmt.Errorf("cannot locate compute instance if name is empty and no compute ID specified")
|
||||
}
|
||||
|
||||
rgId, argSet := d.GetOk("rg_id")
|
||||
if !argSet {
|
||||
return "", fmt.Errorf("cannot locate compute by name %s if no resource group ID is set", computeName.(string))
|
||||
}
|
||||
|
||||
urlValues.Add("rgId", fmt.Sprintf("%d", rgId))
|
||||
apiResp, err := c.DecortAPICall(ctx, "POST", RgListComputesAPI, urlValues)
|
||||
urlValues.Add("computeId", d.Id())
|
||||
computeRaw, err := c.DecortAPICall(ctx, "POST", ComputeGetAPI, urlValues)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return *compute, err
|
||||
}
|
||||
|
||||
log.Debugf("utilityComputeCheckPresence: ready to unmarshal string %s", apiResp)
|
||||
|
||||
computeList := RgListComputesResp{}
|
||||
err = json.Unmarshal([]byte(apiResp), &computeList)
|
||||
err = json.Unmarshal([]byte(computeRaw), &compute)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return *compute, err
|
||||
}
|
||||
|
||||
// log.Printf("%#v", computeList)
|
||||
log.Debugf("utilityComputeCheckPresence: traversing decoded JSON of length %d", len(computeList))
|
||||
for index, item := range computeList {
|
||||
// need to match Compute by name, skip Computes with the same name in DESTROYED satus
|
||||
if item.Name == computeName.(string) && item.Status != "DESTROYED" {
|
||||
log.Debugf("utilityComputeCheckPresence: index %d, matched name %s", index, item.Name)
|
||||
// we found the Compute we need - now get detailed information via compute/get API
|
||||
cgetValues := &url.Values{}
|
||||
cgetValues.Add("computeId", fmt.Sprintf("%d", item.ID))
|
||||
apiResp, err = c.DecortAPICall(ctx, "POST", ComputeGetAPI, cgetValues)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return apiResp, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil // there should be no error if Compute does not exist
|
||||
return *compute, nil
|
||||
}
|
||||
|
||||
29
internal/service/cloudapi/kvmvm/utility_compute_audits.go
Normal file
29
internal/service/cloudapi/kvmvm/utility_compute_audits.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityComputeAuditsCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListAudits, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
computeAudits := &ListAudits{}
|
||||
|
||||
urlValues.Add("computeId", strconv.Itoa(d.Get("compute_id").(int)))
|
||||
computeAuditsRaw, err := c.DecortAPICall(ctx, "POST", ComputeAuditsAPI, urlValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(computeAuditsRaw), &computeAudits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return *computeAudits, nil
|
||||
}
|
||||
23
internal/service/cloudapi/kvmvm/utility_compute_boot_disk.go
Normal file
23
internal/service/cloudapi/kvmvm/utility_compute_boot_disk.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
)
|
||||
|
||||
func utilityComputeBootDiskCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*ItemComputeDisk, error) {
|
||||
compute, err := utilityComputeCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bootDisk := &ItemComputeDisk{}
|
||||
for _, disk := range compute.Disks {
|
||||
if disk.Name == "bootdisk" {
|
||||
*bootDisk = disk
|
||||
break
|
||||
}
|
||||
}
|
||||
return bootDisk, nil
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityComputeGetAuditsCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListShortAudits, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
computeAudits := &ListShortAudits{}
|
||||
|
||||
urlValues.Add("computeId", strconv.Itoa(d.Get("compute_id").(int)))
|
||||
computeAuditsRaw, err := c.DecortAPICall(ctx, "POST", ComputeGetAuditsAPI, urlValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(computeAuditsRaw), &computeAudits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return *computeAudits, nil
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityComputeGetConsoleUrlCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
|
||||
urlValues.Add("computeId", strconv.Itoa(d.Get("compute_id").(int)))
|
||||
computeConsoleUrlRaw, err := c.DecortAPICall(ctx, "POST", ComputeGetConsoleUrlAPI, urlValues)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(computeConsoleUrlRaw), nil
|
||||
}
|
||||
24
internal/service/cloudapi/kvmvm/utility_compute_get_log.go
Normal file
24
internal/service/cloudapi/kvmvm/utility_compute_get_log.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityComputeGetLogCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
|
||||
urlValues.Add("computeId", strconv.Itoa(d.Get("compute_id").(int)))
|
||||
urlValues.Add("path", d.Get("path").(string))
|
||||
computeGetLogRaw, err := c.DecortAPICall(ctx, "POST", ComputeGetLogAPI, urlValues)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(computeGetLogRaw), nil
|
||||
}
|
||||
39
internal/service/cloudapi/kvmvm/utility_compute_list.go
Normal file
39
internal/service/cloudapi/kvmvm/utility_compute_list.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityDataComputeListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListComputes, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
listComputes := &ListComputes{}
|
||||
|
||||
if includeDeleted, ok := d.GetOk("includedeleted"); ok {
|
||||
urlValues.Add("includeDeleted", strconv.FormatBool(includeDeleted.(bool)))
|
||||
}
|
||||
if page, ok := d.GetOk("page"); ok {
|
||||
urlValues.Add("page", strconv.Itoa(page.(int)))
|
||||
}
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
urlValues.Add("size", strconv.Itoa(size.(int)))
|
||||
}
|
||||
|
||||
listComputesRaw, err := c.DecortAPICall(ctx, "POST", ComputeListAPI, urlValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(listComputesRaw), &listComputes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return *listComputes, nil
|
||||
|
||||
}
|
||||
30
internal/service/cloudapi/kvmvm/utility_compute_pfw_list.go
Normal file
30
internal/service/cloudapi/kvmvm/utility_compute_pfw_list.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityComputePfwListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListPFWs, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
listPFWs := &ListPFWs{}
|
||||
|
||||
urlValues.Add("computeId", strconv.Itoa(d.Get("compute_id").(int)))
|
||||
computePfwListRaw, err := c.DecortAPICall(ctx, "POST", ComputePfwListAPI, urlValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(computePfwListRaw), &listPFWs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return *listPFWs, err
|
||||
|
||||
}
|
||||
28
internal/service/cloudapi/kvmvm/utility_compute_user_list.go
Normal file
28
internal/service/cloudapi/kvmvm/utility_compute_user_list.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityComputeUserListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (RecordACL, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
userList := &RecordACL{}
|
||||
|
||||
urlValues.Add("computeId", strconv.Itoa(d.Get("compute_id").(int)))
|
||||
computeUserListRaw, err := c.DecortAPICall(ctx, "POST", ComputeUserListAPI, urlValues)
|
||||
if err != nil {
|
||||
return *userList, err
|
||||
}
|
||||
err = json.Unmarshal([]byte(computeUserListRaw), &userList)
|
||||
if err != nil {
|
||||
return *userList, err
|
||||
}
|
||||
return *userList, err
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityDataComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (RecordCompute, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
compute := &RecordCompute{}
|
||||
|
||||
urlValues.Add("computeId", strconv.Itoa(d.Get("compute_id").(int)))
|
||||
computeRaw, err := c.DecortAPICall(ctx, "POST", ComputeGetAPI, urlValues)
|
||||
if err != nil {
|
||||
return *compute, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(computeRaw), &compute)
|
||||
if err != nil {
|
||||
return *compute, err
|
||||
}
|
||||
return *compute, nil
|
||||
}
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -31,9 +32,26 @@ Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
|
||||
package rg
|
||||
|
||||
const ResgroupCreateAPI = "/restmachine/cloudapi/rg/create"
|
||||
const ResgroupUpdateAPI = "/restmachine/cloudapi/rg/update"
|
||||
const ResgroupListAPI = "/restmachine/cloudapi/rg/list"
|
||||
const ResgroupGetAPI = "/restmachine/cloudapi/rg/get"
|
||||
const ResgroupDeleteAPI = "/restmachine/cloudapi/rg/delete"
|
||||
const RgListComputesAPI = "/restmachine/cloudapi/rg/listComputes"
|
||||
const (
|
||||
ResgroupCreateAPI = "/restmachine/cloudapi/rg/create"
|
||||
ResgroupUpdateAPI = "/restmachine/cloudapi/rg/update"
|
||||
ResgroupListAPI = "/restmachine/cloudapi/rg/list"
|
||||
ResgroupListDeletedAPI = "/restmachine/cloudapi/rg/listDeleted"
|
||||
ResgroupListPfwAPI = "/restmachine/cloudapi/rg/listPFW"
|
||||
ResgroupGetAPI = "/restmachine/cloudapi/rg/get"
|
||||
ResgroupListVinsAPI = "/restmachine/cloudapi/rg/listVins"
|
||||
ResgroupListLbAPI = "/restmachine/cloudapi/rg/listLb"
|
||||
ResgroupDeleteAPI = "/restmachine/cloudapi/rg/delete"
|
||||
RgListComputesAPI = "/restmachine/cloudapi/rg/listComputes"
|
||||
RgAffinityGroupComputesAPI = "/restmachine/cloudapi/rg/affinityGroupComputes"
|
||||
RgAffinityGroupsGetAPI = "/restmachine/cloudapi/rg/affinityGroupsGet"
|
||||
RgAffinityGroupsListAPI = "/restmachine/cloudapi/rg/affinityGroupsList"
|
||||
RgAuditsAPI = "/restmachine/cloudapi/rg/audits"
|
||||
RgEnableAPI = "/restmachine/cloudapi/rg/enable"
|
||||
RgDisableAPI = "/restmachine/cloudapi/rg/disable"
|
||||
ResgroupUsageAPI = "/restmachine/cloudapi/rg/usage"
|
||||
RgAccessGrantAPI = "/restmachine/cloudapi/rg/accessGrant"
|
||||
RgAccessRevokeAPI = "/restmachine/cloudapi/rg/accessRevoke"
|
||||
RgSetDefNetAPI = "/restmachine/cloudapi/rg/setDefNet"
|
||||
RgRestoreAPI = "/restmachine/cloudapi/rg/restore"
|
||||
)
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -33,65 +34,369 @@ package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
// "net/url"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
)
|
||||
|
||||
func flattenResgroup(d *schema.ResourceData, rg_facts string) error {
|
||||
// NOTE: this function modifies ResourceData argument - as such it should never be called
|
||||
// from resourceRsgroupExists(...) method
|
||||
// log.Debugf("%s", rg_facts)
|
||||
log.Debugf("flattenResgroup: ready to decode response body from API")
|
||||
details := ResgroupGetResp{}
|
||||
err := json.Unmarshal([]byte(rg_facts), &details)
|
||||
if err != nil {
|
||||
return err
|
||||
func sepsSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"sep_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"data_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
|
||||
log.Debugf("flattenResgroup: decoded RG name %q / ID %d, account ID %d",
|
||||
details.Name, details.ID, details.AccountID)
|
||||
return res
|
||||
}
|
||||
|
||||
d.SetId(fmt.Sprintf("%d", details.ID))
|
||||
d.Set("rg_id", details.ID)
|
||||
d.Set("name", details.Name)
|
||||
d.Set("account_name", details.AccountName)
|
||||
d.Set("account_id", details.AccountID)
|
||||
// d.Set("grid_id", details.GridID)
|
||||
d.Set("description", details.Desc)
|
||||
d.Set("status", details.Status)
|
||||
d.Set("def_net_type", details.DefaultNetType)
|
||||
d.Set("def_net_id", details.DefaultNetID)
|
||||
/*
|
||||
d.Set("vins", details.Vins)
|
||||
d.Set("computes", details.Computes)
|
||||
*/
|
||||
|
||||
log.Debugf("flattenResgroup: calling flattenQuota()")
|
||||
if err = d.Set("quota", parseQuota(details.Quota)); err != nil {
|
||||
return err
|
||||
func resourcesSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"current": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"cpu": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"extips": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"exttraffic": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"gpu": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"ram": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"seps": {
|
||||
Type: schema.TypeSet,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"sep_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"map": {
|
||||
Type: schema.TypeMap,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"reserved": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"cpu": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"extips": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"exttraffic": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"gpu": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"ram": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"seps": {
|
||||
Type: schema.TypeSet,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"sep_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"map": {
|
||||
Type: schema.TypeMap,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
return res
|
||||
}
|
||||
|
||||
func aclSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"explicit": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"right": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"user_group_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func resourceLimitsSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"cu_c": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"cu_d": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"cu_i": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"cu_m": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"cu_np": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"gpu_units": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func dataSourceRgSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"rg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"reason": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"resources": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: resourcesSchemaMake(),
|
||||
},
|
||||
},
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"acl": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: aclSchemaMake(),
|
||||
},
|
||||
},
|
||||
"created_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"dirty": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"gid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"lock_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"register_computes": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"resource_limits": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: resourceLimitsSchemaMake(),
|
||||
},
|
||||
},
|
||||
"secret": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"vins": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"computes": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"res_types": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"uniq_pools": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func dataSourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
rg_facts, err := utilityResgroupCheckPresence(ctx, d, m)
|
||||
if rg_facts == "" {
|
||||
// if empty string is returned from utilityResgroupCheckPresence then there is no
|
||||
// such resource group and err tells so - just return it to the calling party
|
||||
rg, err := utilityDataResgroupCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
d.SetId("") // ensure ID is empty in this case
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
return diag.FromErr(flattenResgroup(d, rg_facts))
|
||||
d.SetId(strconv.Itoa(d.Get("rg_id").(int)))
|
||||
flattenRg(d, *rg)
|
||||
return nil
|
||||
}
|
||||
|
||||
func DataSourceResgroup() *schema.Resource {
|
||||
@@ -105,92 +410,6 @@ func DataSourceResgroup() *schema.Resource {
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Name of the resource group. Names are case sensitive and unique within the context of an account.",
|
||||
},
|
||||
|
||||
"rg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Unique ID of the resource group. If this ID is specified, then resource group name is ignored.",
|
||||
},
|
||||
|
||||
"account_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Name of the account, which this resource group belongs to.",
|
||||
},
|
||||
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "Unique ID of the account, which this resource group belongs to.",
|
||||
},
|
||||
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "User-defined text description of this resource group.",
|
||||
},
|
||||
|
||||
/* commented out, as in this version of provider we use default Grid ID
|
||||
"grid_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Unique ID of the grid, where this resource group is deployed.",
|
||||
},
|
||||
*/
|
||||
|
||||
"quota": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: quotaRgSubresourceSchemaMake(), // this is a dictionary
|
||||
},
|
||||
Description: "Quota settings for this resource group.",
|
||||
},
|
||||
|
||||
"def_net_type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Type of the default network for this resource group.",
|
||||
},
|
||||
|
||||
"def_net_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "ID of the default network for this resource group (if any).",
|
||||
},
|
||||
|
||||
/*
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Current status of this resource group.",
|
||||
},
|
||||
|
||||
"vins": {
|
||||
Type: schema.TypeList, // this is a list of ints
|
||||
Computed: true,
|
||||
MaxItems: LimitMaxVinsPerResgroup,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
Description: "List of VINs deployed in this resource group.",
|
||||
},
|
||||
|
||||
"computes": {
|
||||
Type: schema.TypeList, //t his is a list of ints
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
Description: "List of computes deployed in this resource group.",
|
||||
},
|
||||
*/
|
||||
},
|
||||
Schema: dataSourceRgSchemaMake(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,108 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceRgAffinityGroupComputesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
rgComputes, err := utilityRgAffinityGroupComputesCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.SetId(strconv.Itoa(d.Get("rg_id").(int)))
|
||||
d.Set("items", flattenRgAffinityGroupComputes(rgComputes))
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceRgAffinityGroupComputesSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"rg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "ID of the RG",
|
||||
},
|
||||
"affinity_group": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "Affinity group label",
|
||||
},
|
||||
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"other_node": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"other_node_indirect": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"other_node_indirect_soft": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"other_node_soft": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"same_node": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"same_node_soft": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func DataSourceRgAffinityGroupComputes() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceRgAffinityGroupComputesRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceRgAffinityGroupComputesSchemaMake(),
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,60 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceRgAffinityGroupsGetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
computes, err := utilityRgAffinityGroupsGetCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
d.SetId(strconv.Itoa(d.Get("rg_id").(int)))
|
||||
d.Set("ids", computes)
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceRgAffinityGroupsGetSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"rg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "ID of the RG",
|
||||
},
|
||||
"affinity_group": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "Affinity group label",
|
||||
},
|
||||
|
||||
"ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func DataSourceRgAffinityGroupsGet() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceRgAffinityGroupsGetRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceRgAffinityGroupsGetSchemaMake(),
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,67 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceRgAffinityGroupsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
list, err := utilityRgAffinityGroupsListCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.SetId(strconv.Itoa(d.Get("rg_id").(int)))
|
||||
d.Set("affinity_groups", flattenRgListGroups(list))
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceRgAffinityGroupsListSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"rg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "ID of the RG",
|
||||
},
|
||||
"affinity_groups": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"label": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func DataSourceRgAffinityGroupsList() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceRgAffinityGroupsListRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceRgAffinityGroupsListSchemaMake(),
|
||||
}
|
||||
}
|
||||
77
internal/service/cloudapi/rg/data_source_rg_audits.go
Normal file
77
internal/service/cloudapi/rg/data_source_rg_audits.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceRgAuditsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
rgAudits, err := utilityRgAuditsCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.SetId(strconv.Itoa(d.Get("rg_id").(int)))
|
||||
d.Set("items", flattenRgAudits(rgAudits))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceRgAuditsSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"rg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"call": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"responsetime": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"statuscode": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"user": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func DataSourceRgAudits() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceRgAuditsRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceRgAuditsSchemaMake(),
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -40,73 +41,6 @@ import (
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func flattenRgList(rgl ResgroupListResp) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, rg := range rgl {
|
||||
temp := map[string]interface{}{
|
||||
"account_id": rg.AccountID,
|
||||
"account_name": rg.AccountName,
|
||||
"acl": flattenRgAcl(rg.ACLs),
|
||||
"created_by": rg.CreatedBy,
|
||||
"created_time": rg.CreatedTime,
|
||||
"def_net_id": rg.DefaultNetID,
|
||||
"def_net_type": rg.DefaultNetType,
|
||||
"deleted_by": rg.DeletedBy,
|
||||
"deleted_time": rg.DeletedTime,
|
||||
"desc": rg.Decsription,
|
||||
"gid": rg.GridID,
|
||||
"guid": rg.GUID,
|
||||
"rg_id": rg.ID,
|
||||
"lock_status": rg.LockStatus,
|
||||
"milestones": rg.Milestones,
|
||||
"name": rg.Name,
|
||||
"register_computes": rg.RegisterComputes,
|
||||
"resource_limits": flattenRgResourceLimits(rg.ResourceLimits),
|
||||
"secret": rg.Secret,
|
||||
"status": rg.Status,
|
||||
"updated_by": rg.UpdatedBy,
|
||||
"updated_time": rg.UpdatedTime,
|
||||
"vins": rg.Vins,
|
||||
"vms": rg.Computes,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
|
||||
}
|
||||
|
||||
func flattenRgAcl(rgAcls []AccountAclRecord) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, rgAcl := range rgAcls {
|
||||
temp := map[string]interface{}{
|
||||
"explicit": rgAcl.IsExplicit,
|
||||
"guid": rgAcl.Guid,
|
||||
"right": rgAcl.Rights,
|
||||
"status": rgAcl.Status,
|
||||
"type": rgAcl.Type,
|
||||
"user_group_id": rgAcl.UgroupID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRgResourceLimits(rl ResourceLimits) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"cu_c": rl.CUC,
|
||||
"cu_d": rl.CUD,
|
||||
"cu_i": rl.CUI,
|
||||
"cu_m": rl.CUM,
|
||||
"cu_np": rl.CUNP,
|
||||
"gpu_units": rl.GpuUnits,
|
||||
}
|
||||
res = append(res, temp)
|
||||
|
||||
return res
|
||||
|
||||
}
|
||||
|
||||
func dataSourceRgListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
rgList, err := utilityRgListCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
@@ -138,11 +72,19 @@ func dataSourceRgListSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "Page size",
|
||||
},
|
||||
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"account_acl": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: aclSchemaMake(),
|
||||
},
|
||||
},
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -155,32 +97,7 @@ func dataSourceRgListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"explicit": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"right": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"user_group_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
Schema: aclSchemaMake(),
|
||||
},
|
||||
},
|
||||
"created_by": {
|
||||
@@ -211,6 +128,10 @@ func dataSourceRgListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"dirty": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"gid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -243,32 +164,7 @@ func dataSourceRgListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"cu_c": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"cu_d": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"cu_i": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"cu_m": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"cu_np": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"gpu_units": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
Schema: resourceLimitsSchemaMake(),
|
||||
},
|
||||
},
|
||||
"secret": {
|
||||
@@ -301,6 +197,20 @@ func dataSourceRgListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"resource_types": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"uniq_pools": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
193
internal/service/cloudapi/rg/data_source_rg_list_computes.go
Normal file
193
internal/service/cloudapi/rg/data_source_rg_list_computes.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceRgListComputesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
listComputes, err := utilityRgListComputesCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
d.Set("items", flattenRgListComputes(listComputes))
|
||||
return nil
|
||||
}
|
||||
|
||||
func rulesSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"key": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"mode": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"policy": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"topology": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"value": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
|
||||
}
|
||||
|
||||
func dataSourceRgListComputesSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"rg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "ID of the RG",
|
||||
},
|
||||
"reason": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "reason for action",
|
||||
},
|
||||
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"affinity_label": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"affinity_rules": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: rulesSchemaMake(),
|
||||
},
|
||||
},
|
||||
"affinity_weight": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"antiaffinity_rules": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: rulesSchemaMake(),
|
||||
},
|
||||
},
|
||||
"cpus": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"created_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"ram": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"registered": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"rg_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"total_disks_size": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"user_managed": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"vins_connected": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func DataSourceRgListComputes() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceRgListComputesRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceRgListComputesSchemaMake(),
|
||||
}
|
||||
}
|
||||
196
internal/service/cloudapi/rg/data_source_rg_list_deleted.go
Normal file
196
internal/service/cloudapi/rg/data_source_rg_list_deleted.go
Normal file
@@ -0,0 +1,196 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceRgListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
rgList, err := utilityRgListDeletedCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
d.Set("items", flattenRgList(rgList))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceRgListDeletedSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"page": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Page number",
|
||||
},
|
||||
"size": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Page size",
|
||||
},
|
||||
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"account_acl": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: aclSchemaMake(),
|
||||
},
|
||||
},
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"acl": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: aclSchemaMake(),
|
||||
},
|
||||
},
|
||||
"created_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"def_net_type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"dirty": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"gid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"rg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"lock_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"register_computes": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"resource_limits": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: resourceLimitsSchemaMake(),
|
||||
},
|
||||
},
|
||||
"secret": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"vins": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"vms": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
},
|
||||
"resource_types": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"uniq_pools": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func DataSourceRgListDeleted() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceRgListDeletedRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceRgListDeletedSchemaMake(),
|
||||
}
|
||||
}
|
||||
357
internal/service/cloudapi/rg/data_source_rg_list_lb.go
Normal file
357
internal/service/cloudapi/rg/data_source_rg_list_lb.go
Normal file
@@ -0,0 +1,357 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceRgListLbRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
listLb, err := utilityRgListLbCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.SetId(strconv.Itoa(d.Get("rg_id").(int)))
|
||||
d.Set("items", flattenRgListLb(listLb))
|
||||
return nil
|
||||
}
|
||||
|
||||
func serversSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"address": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"check": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"port": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"server_settings": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: serverSettingsSchemaMake(),
|
||||
},
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func serverSettingsSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"inter": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"down_inter": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"rise": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"fall": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"slow_start": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"max_conn": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"max_queue": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"weight": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func backendsSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"algorithm": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"server_default_settings": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: serverSettingsSchemaMake(),
|
||||
},
|
||||
},
|
||||
"servers": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: serversSchemaMake(),
|
||||
},
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func bindingsSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"address": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"port": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func frontendsSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"backend": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"bindings": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: bindingsSchemaMake(),
|
||||
},
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func nodeSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"backend_ip": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"frontend_ip": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"mgmt_ip": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"network_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func dataSourceRgListLbSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"rg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "ID of the RG",
|
||||
},
|
||||
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"ha_mode": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"acl": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: aclSchemaMake(),
|
||||
},
|
||||
},
|
||||
"backends": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: backendsSchemaMake(),
|
||||
},
|
||||
},
|
||||
"created_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"dp_api_user": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"extnet_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"frontends": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: frontendsSchemaMake(),
|
||||
},
|
||||
},
|
||||
"gid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"image_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"primary_node": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: nodeSchemaMake(),
|
||||
},
|
||||
},
|
||||
"rg_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"secondary_node": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: nodeSchemaMake(),
|
||||
},
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"vins_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func DataSourceRgListLb() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceRgListLbRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceRgListLbSchemaMake(),
|
||||
}
|
||||
}
|
||||
89
internal/service/cloudapi/rg/data_source_rg_list_pfw.go
Normal file
89
internal/service/cloudapi/rg/data_source_rg_list_pfw.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceRgListPfwRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
listPfw, err := utilityRgListPfwCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.SetId(strconv.Itoa(d.Get("rg_id").(int)))
|
||||
d.Set("items", flattenRgListPfw(listPfw))
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceRgListPfwSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"rg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "ID of the RG",
|
||||
},
|
||||
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"public_port_end": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"public_port_start": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"vm_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"vm_ip": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"vm_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"vm_port": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"vins_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"vins_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func DataSourceRgListPfw() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceRgListPfwRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceRgListPfwSchemaMake(),
|
||||
}
|
||||
}
|
||||
126
internal/service/cloudapi/rg/data_source_rg_list_vins.go
Normal file
126
internal/service/cloudapi/rg/data_source_rg_list_vins.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceRgListVinsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
listVins, err := utilityRgListVinsCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.SetId(strconv.Itoa(d.Get("rg_id").(int)))
|
||||
d.Set("items", flattenRgListVins(listVins))
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceRgListVinsSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"rg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "ID of the RG",
|
||||
},
|
||||
"reason": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Reason for action",
|
||||
},
|
||||
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"computes": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"created_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"external_ip": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"network": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"pri_vnf_dev_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"rg_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func DataSourceRgListVins() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceRgListVinsRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceRgListVinsSchemaMake(),
|
||||
}
|
||||
}
|
||||
99
internal/service/cloudapi/rg/data_source_rg_usage.go
Normal file
99
internal/service/cloudapi/rg/data_source_rg_usage.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceRgUsageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
usage, err := utilityDataRgUsageCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.SetId(strconv.Itoa(d.Get("rg_id").(int)))
|
||||
flattenRgUsageResource(d, *usage)
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceRgUsageSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"rg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"reason": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"cpu": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_size_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"extips": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"exttraffic": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"gpu": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"ram": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"seps": {
|
||||
Type: schema.TypeSet,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"sep_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"map": {
|
||||
Type: schema.TypeMap,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func DataSourceRgUsage() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceRgUsageRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceRgUsageSchemaMake(),
|
||||
}
|
||||
}
|
||||
579
internal/service/cloudapi/rg/flattens.go
Normal file
579
internal/service/cloudapi/rg/flattens.go
Normal file
@@ -0,0 +1,579 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://github.com/rudecs/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package rg
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func flattenAccountSeps(seps map[string]map[string]DiskUsage) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for sepKey, sepVal := range seps {
|
||||
for dataKey, dataVal := range sepVal {
|
||||
temp := map[string]interface{}{
|
||||
"sep_id": sepKey,
|
||||
"data_name": dataKey,
|
||||
"disk_size": dataVal.DiskSize,
|
||||
"disk_size_max": dataVal.DiskSizeMax,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenAccResource(r Resource) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"cpu": r.CPU,
|
||||
"disksize": r.DiskSize,
|
||||
"extips": r.ExtIPs,
|
||||
"exttraffic": r.ExtTraffic,
|
||||
"gpu": r.GPU,
|
||||
"ram": r.RAM,
|
||||
"seps": flattenAccountSeps(r.SEPs),
|
||||
}
|
||||
res = append(res, temp)
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRgResources(r Resources) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"current": flattenAccResource(r.Current),
|
||||
"reserved": flattenAccResource(r.Reserved),
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenResgroup(d *schema.ResourceData, details RecordResourceGroup) error {
|
||||
// NOTE: this function modifies ResourceData argument - as such it should never be called
|
||||
// from resourceRsgroupExists(...) method
|
||||
// log.Debugf("%s", rg_facts)
|
||||
//log.Debugf("flattenResgroup: ready to decode response body from API")
|
||||
//details := ResgroupGetResp{}
|
||||
//err := json.Unmarshal([]byte(rg_facts), &details)
|
||||
//if err != nil {
|
||||
//return err
|
||||
//}
|
||||
|
||||
log.Debugf("flattenResgroup: decoded RG name %q / ID %d, account ID %d",
|
||||
details.Name, details.ID, details.AccountID)
|
||||
|
||||
d.SetId(fmt.Sprintf("%d", details.ID))
|
||||
|
||||
d.Set("account_id", details.AccountID)
|
||||
d.Set("gid", details.GID)
|
||||
d.Set("def_net_type", details.DefNetType)
|
||||
d.Set("name", details.Name)
|
||||
|
||||
d.Set("resources", flattenRgResource(details.Resources))
|
||||
d.Set("account_name", details.AccountName)
|
||||
d.Set("acl", flattenRgAcl(details.ACL))
|
||||
d.Set("vms", details.Computes)
|
||||
d.Set("created_by", details.CreatedBy)
|
||||
d.Set("created_time", details.CreatedTime)
|
||||
d.Set("def_net_id", details.DefNetID)
|
||||
d.Set("deleted_by", details.DeletedBy)
|
||||
d.Set("deleted_time", details.DeletedTime)
|
||||
d.Set("description", details.Description)
|
||||
d.Set("dirty", details.Dirty)
|
||||
d.Set("guid", details.GUID)
|
||||
d.Set("rg_id", details.ID)
|
||||
d.Set("lock_status", details.LockStatus)
|
||||
d.Set("milestones", details.Milestones)
|
||||
d.Set("register_computes", details.RegisterComputes)
|
||||
d.Set("res_types", details.ResTypes)
|
||||
d.Set("secret", details.Secret)
|
||||
d.Set("status", details.Status)
|
||||
d.Set("updated_by", details.UpdatedBy)
|
||||
d.Set("updated_time", details.UpdatedTime)
|
||||
d.Set("uniq_pools", details.UniqPools)
|
||||
d.Set("vins", details.VINS)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenRgSeps(seps map[string]map[string]DiskUsage) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for sepKey, sepVal := range seps {
|
||||
SepMap := map[string]interface{}{}
|
||||
for dataKey, dataVal := range sepVal {
|
||||
val, _ := json.Marshal(dataVal)
|
||||
SepMap[dataKey] = string(val)
|
||||
}
|
||||
temp := map[string]interface{}{
|
||||
"sep_id": sepKey,
|
||||
"map": SepMap,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenResource(resource Resource) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
|
||||
temp := map[string]interface{}{
|
||||
"cpu": resource.CPU,
|
||||
"disk_size": resource.DiskSize,
|
||||
"disk_size_max": resource.DiskSizeMax,
|
||||
"extips": resource.ExtIPs,
|
||||
"exttraffic": resource.ExtTraffic,
|
||||
"gpu": resource.GPU,
|
||||
"ram": resource.RAM,
|
||||
"seps": flattenRgSeps(resource.SEPs),
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRgResource(itemResource Resources) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"current": flattenResource(itemResource.Current),
|
||||
"reserved": flattenResource(itemResource.Reserved),
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRg(d *schema.ResourceData, itemRg RecordResourceGroup) {
|
||||
d.Set("resources", flattenRgResource(itemRg.Resources))
|
||||
d.Set("account_id", itemRg.AccountID)
|
||||
d.Set("account_name", itemRg.AccountName)
|
||||
d.Set("acl", flattenRgAcl(itemRg.ACL))
|
||||
d.Set("computes", itemRg.Computes)
|
||||
d.Set("created_by", itemRg.CreatedBy)
|
||||
d.Set("created_time", itemRg.CreatedTime)
|
||||
d.Set("def_net_id", itemRg.DefNetID)
|
||||
d.Set("def_net_type", itemRg.DefNetType)
|
||||
d.Set("deleted_by", itemRg.DeletedBy)
|
||||
d.Set("deleted_time", itemRg.DeletedTime)
|
||||
d.Set("desc", itemRg.Description)
|
||||
d.Set("dirty", itemRg.Dirty)
|
||||
d.Set("gid", itemRg.GID)
|
||||
d.Set("guid", itemRg.GUID)
|
||||
d.Set("rg_id", itemRg.ID)
|
||||
d.Set("lock_status", itemRg.LockStatus)
|
||||
d.Set("milestones", itemRg.Milestones)
|
||||
d.Set("name", itemRg.Name)
|
||||
d.Set("register_computes", itemRg.RegisterComputes)
|
||||
d.Set("res_types", itemRg.ResTypes)
|
||||
d.Set("resource_limits", flattenRgResourceLimits(itemRg.ResourceLimits))
|
||||
d.Set("secret", itemRg.Secret)
|
||||
d.Set("status", itemRg.Status)
|
||||
d.Set("updated_by", itemRg.UpdatedBy)
|
||||
d.Set("updated_time", itemRg.UpdatedTime)
|
||||
d.Set("uniq_pools", itemRg.UniqPools)
|
||||
d.Set("vins", itemRg.VINS)
|
||||
}
|
||||
|
||||
func flattenRgAudits(rgAudits ListAudits) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, rgAudit := range rgAudits {
|
||||
temp := map[string]interface{}{
|
||||
"call": rgAudit.Call,
|
||||
"responsetime": rgAudit.ResponseTime,
|
||||
"statuscode": rgAudit.StatusCode,
|
||||
"timestamp": rgAudit.Timestamp,
|
||||
"user": rgAudit.User,
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRgList(rgl ListResourceGroups) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, rg := range rgl {
|
||||
temp := map[string]interface{}{
|
||||
"account_acl": flattenRgAcl(rg.ACL),
|
||||
"account_id": rg.AccountID,
|
||||
"account_name": rg.AccountName,
|
||||
"acl": flattenRgAcl(rg.ACL),
|
||||
"created_by": rg.CreatedBy,
|
||||
"created_time": rg.CreatedTime,
|
||||
"def_net_id": rg.DefNetID,
|
||||
"def_net_type": rg.DefNetType,
|
||||
"deleted_by": rg.DeletedBy,
|
||||
"deleted_time": rg.DeletedTime,
|
||||
"desc": rg.Description,
|
||||
"dirty": rg.Dirty,
|
||||
"gid": rg.GID,
|
||||
"guid": rg.GUID,
|
||||
"rg_id": rg.ID,
|
||||
"lock_status": rg.LockStatus,
|
||||
"milestones": rg.Milestones,
|
||||
"name": rg.Name,
|
||||
"register_computes": rg.RegisterComputes,
|
||||
"resource_limits": flattenRgResourceLimits(rg.ResourceLimits),
|
||||
"secret": rg.Secret,
|
||||
"status": rg.Status,
|
||||
"updated_by": rg.UpdatedBy,
|
||||
"updated_time": rg.UpdatedTime,
|
||||
"vins": rg.VINS,
|
||||
"vms": rg.Computes,
|
||||
"resource_types": rg.ResTypes,
|
||||
"uniq_pools": rg.UniqPools,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
|
||||
}
|
||||
|
||||
func flattenRgAcl(rgAcls ListACL) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, rgAcl := range rgAcls {
|
||||
temp := map[string]interface{}{
|
||||
"explicit": rgAcl.Explicit,
|
||||
"guid": rgAcl.GUID,
|
||||
"right": rgAcl.Right,
|
||||
"status": rgAcl.Status,
|
||||
"type": rgAcl.Type,
|
||||
"user_group_id": rgAcl.UserGroupID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRgResourceLimits(rl ResourceLimits) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"cu_c": rl.CUC,
|
||||
"cu_d": rl.CUD,
|
||||
"cu_i": rl.CUI,
|
||||
"cu_m": rl.CUM,
|
||||
"cu_np": rl.CUNP,
|
||||
"gpu_units": rl.GpuUnits,
|
||||
}
|
||||
res = append(res, temp)
|
||||
|
||||
return res
|
||||
|
||||
}
|
||||
|
||||
func flattenRules(list ListRules) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, rule := range list {
|
||||
temp := map[string]interface{}{
|
||||
"guid": rule.GUID,
|
||||
"key": rule.Key,
|
||||
"mode": rule.Mode,
|
||||
"policy": rule.Policy,
|
||||
"topology": rule.Topology,
|
||||
"value": rule.Value,
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRgListComputes(lc ListComputes) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, compute := range lc {
|
||||
temp := map[string]interface{}{
|
||||
"account_id": compute.AccountID,
|
||||
"account_name": compute.AccountName,
|
||||
"affinity_label": compute.AffinityLabel,
|
||||
"affinity_rules": flattenRules(compute.AffinityRules),
|
||||
"affinity_weight": compute.AffinityWeight,
|
||||
"antiaffinity_rules": flattenRules(compute.AntiAffinityRules),
|
||||
"cpus": compute.CPUs,
|
||||
"created_by": compute.CreatedBy,
|
||||
"created_time": compute.CreatedTime,
|
||||
"deleted_by": compute.DeletedBy,
|
||||
"deleted_time": compute.DeletedTime,
|
||||
"id": compute.ID,
|
||||
"name": compute.Name,
|
||||
"ram": compute.RAM,
|
||||
"registered": compute.Registered,
|
||||
"rg_name": compute.RGName,
|
||||
"status": compute.Status,
|
||||
"tech_status": compute.TechStatus,
|
||||
"total_disks_size": compute.TotalDisksSize,
|
||||
"updated_by": compute.DeletedBy,
|
||||
"updated_time": compute.DeletedTime,
|
||||
"user_managed": compute.UserManaged,
|
||||
"vins_connected": compute.VINSConnected,
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenServerSettings(settings ServerSettings) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"inter": settings.Inter,
|
||||
"guid": settings.GUID,
|
||||
"down_inter": settings.DownInter,
|
||||
"rise": settings.Rise,
|
||||
"fall": settings.Fall,
|
||||
"slow_start": settings.SlowStart,
|
||||
"max_conn": settings.MaxConn,
|
||||
"max_queue": settings.MaxQueue,
|
||||
"weight": settings.Weight,
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenListServers(list ListServers) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, serv := range list {
|
||||
temp := map[string]interface{}{
|
||||
"address": serv.Address,
|
||||
"check": serv.Check,
|
||||
"guid": serv.GUID,
|
||||
"name": serv.Name,
|
||||
"port": serv.Port,
|
||||
"server_settings": flattenServerSettings(serv.ServerSettings),
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenBackends(b ListBackends) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, item := range b {
|
||||
temp := map[string]interface{}{
|
||||
"algorithm": item.Algorithm,
|
||||
"guid": item.GUID,
|
||||
"name": item.Name,
|
||||
"server_default_settings": flattenServerSettings(item.ServerDefaultSettings),
|
||||
"servers": flattenListServers(item.Servers),
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenBindings(list ListBindings) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, bind := range list {
|
||||
temp := map[string]interface{}{
|
||||
"address": bind.Address,
|
||||
"guid": bind.GUID,
|
||||
"name": bind.Name,
|
||||
"port": bind.Port,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenFrontends(list ListFrontends) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, front := range list {
|
||||
temp := map[string]interface{}{
|
||||
"backend": front.Backend,
|
||||
"bindings": flattenBindings(front.Bindings),
|
||||
"guid": front.GUID,
|
||||
"name": front.Name,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenNode(node RecordNode) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"backend_ip": node.BackendIP,
|
||||
"compute_id": node.ComputeID,
|
||||
"frontend_ip": node.FrontendIP,
|
||||
"guid": node.GUID,
|
||||
"mgmt_ip": node.MGMTIP,
|
||||
"network_id": node.NetworkID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRgListLb(listLb ListLB) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, lb := range listLb {
|
||||
temp := map[string]interface{}{
|
||||
"ha_mode": lb.HAMode,
|
||||
"acl": lb.ACL,
|
||||
"backends": flattenBackends(lb.Backends),
|
||||
"created_by": lb.CreatedBy,
|
||||
"created_time": lb.CreatedTime,
|
||||
"deleted_by": lb.DeletedBy,
|
||||
"deleted_time": lb.DeletedTime,
|
||||
"desc": lb.Description,
|
||||
"dp_api_user": lb.DPAPIUser,
|
||||
"extnet_id": lb.ExtNetID,
|
||||
"frontends": flattenFrontends(lb.Frontends),
|
||||
"gid": lb.GID,
|
||||
"guid": lb.GUID,
|
||||
"id": lb.ID,
|
||||
"image_id": lb.ImageID,
|
||||
"milestones": lb.Milestones,
|
||||
"name": lb.Name,
|
||||
"primary_node": flattenNode(lb.PrimaryNode),
|
||||
"rg_name": lb.RGName,
|
||||
"secondary_node": flattenNode(lb.SecondaryNode),
|
||||
"status": lb.Status,
|
||||
"tech_status": lb.TechStatus,
|
||||
"updated_by": lb.UpdatedBy,
|
||||
"updated_time": lb.UpdatedTime,
|
||||
"vins_id": lb.VINSID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRgListPfw(listPfw ListPFW) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, pfw := range listPfw {
|
||||
temp := map[string]interface{}{
|
||||
"public_port_end": pfw.PublicPortEnd,
|
||||
"public_port_start": pfw.PublicPortStart,
|
||||
"vm_id": pfw.VMID,
|
||||
"vm_ip": pfw.VMIP,
|
||||
"vm_name": pfw.VMName,
|
||||
"vm_port": pfw.VMPort,
|
||||
"vins_id": pfw.VINSID,
|
||||
"vins_name": pfw.VINSName,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRgListVins(lv ListVINS) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, vins := range lv {
|
||||
temp := map[string]interface{}{
|
||||
"account_id": vins.AccountID,
|
||||
"account_name": vins.AccountName,
|
||||
"computes": vins.Computes,
|
||||
"created_by": vins.CreatedBy,
|
||||
"created_time": vins.CreatedTime,
|
||||
"deleted_by": vins.DeletedBy,
|
||||
"deleted_time": vins.DeletedTime,
|
||||
"external_ip": vins.ExternalIP,
|
||||
"id": vins.ID,
|
||||
"name": vins.Name,
|
||||
"network": vins.Network,
|
||||
"pri_vnf_dev_id": vins.PriVNFDevID,
|
||||
"rg_name": vins.RGName,
|
||||
"status": vins.Status,
|
||||
"updated_by": vins.UpdatedBy,
|
||||
"updated_time": vins.UpdatedTime,
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRgAffinityGroupComputes(list ListAffinityGroupCompute) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
|
||||
for _, item := range list {
|
||||
temp := map[string]interface{}{
|
||||
"compute_id": item.ComputeID,
|
||||
"other_node": item.OtherNode,
|
||||
"other_node_indirect": item.OtherNodeIndirect,
|
||||
"other_node_indirect_soft": item.OtherNodeIndirectSoft,
|
||||
"other_node_soft": item.OtherNodeSoft,
|
||||
"same_node": item.SameNode,
|
||||
"same_node_soft": item.SameNodeSoft,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRgAffinityGroupsGet(list []uint64) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"items": list,
|
||||
}
|
||||
res = append(res, temp)
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRgListGroups(list map[string][]uint64) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for groupKey, groupVal := range list {
|
||||
temp := map[string]interface{}{
|
||||
"label": groupKey,
|
||||
"ids": groupVal,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenRgUsageResource(d *schema.ResourceData, usage Resource) {
|
||||
d.Set("cpu", usage.CPU)
|
||||
d.Set("disk_size", usage.DiskSize)
|
||||
d.Set("disk_size_max", usage.DiskSizeMax)
|
||||
d.Set("extips", usage.ExtIPs)
|
||||
d.Set("exttraffic", usage.ExtTraffic)
|
||||
d.Set("gpu", usage.GPU)
|
||||
d.Set("ram", usage.RAM)
|
||||
d.Set("seps", flattenRgSeps(usage.SEPs))
|
||||
}
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -40,91 +41,6 @@ type ResourceLimits struct {
|
||||
GpuUnits float64 `json:"gpu_units"`
|
||||
}
|
||||
|
||||
type ResgroupRecord struct {
|
||||
ACLs []AccountAclRecord `json:"acl"`
|
||||
AccountID int `json:"accountId"`
|
||||
AccountName string `json:"accountName"`
|
||||
CreatedBy string `json:"createdBy"`
|
||||
CreatedTime uint64 `json:"createdTime"`
|
||||
DefaultNetID int `json:"def_net_id"`
|
||||
DefaultNetType string `json:"def_net_type"`
|
||||
DeletedBy string `json:"deletedBy"`
|
||||
DeletedTime int `json:"deletedTime"`
|
||||
Decsription string `json:"desc"`
|
||||
GridID int `json:"gid"`
|
||||
GUID int `json:"guid"`
|
||||
ID uint `json:"id"`
|
||||
LockStatus string `json:"lockStatus"`
|
||||
Milestones int `json:"milestones"`
|
||||
Name string `json:"name"`
|
||||
RegisterComputes bool `json:"registerComputes"`
|
||||
ResourceLimits ResourceLimits `json:"resourceLimits"`
|
||||
Secret string `json:"secret"`
|
||||
Status string `json:"status"`
|
||||
UpdatedBy string `json:"updatedBy"`
|
||||
UpdatedTime uint64 `json:"updatedTime"`
|
||||
Vins []int `json:"vins"`
|
||||
Computes []int `json:"vms"`
|
||||
}
|
||||
|
||||
type ResgroupListResp []ResgroupRecord
|
||||
|
||||
type ResgroupUpdateParam struct {
|
||||
RgId int `json:"rgId"`
|
||||
Name string `json:"name"`
|
||||
Desc string `json:"decs"`
|
||||
Ram int `json:"maxMemoryCapacity"`
|
||||
Disk int `json:"maxVDiskCapacity"`
|
||||
Cpu int `json:"maxCPUCapacity"`
|
||||
NetTraffic int `json:"maxNetworkPeerTransfer"`
|
||||
Reason string `json:"reason"`
|
||||
}
|
||||
|
||||
type AccountAclRecord struct {
|
||||
IsExplicit bool `json:"explicit"`
|
||||
Guid string `json:"guid"`
|
||||
Rights string `json:"right"`
|
||||
Status string `json:"status"`
|
||||
Type string `json:"type"`
|
||||
UgroupID string `json:"userGroupId"`
|
||||
CanBeDeleted bool `json:"canBeDeleted"`
|
||||
}
|
||||
|
||||
type ResgroupGetResp struct {
|
||||
ACLs []UserAclRecord `json:"ACLs"`
|
||||
Usage UsageRecord `json:"Resources"`
|
||||
AccountID int `json:"accountId"`
|
||||
AccountName string `json:"accountName"`
|
||||
GridID int `json:"gid"`
|
||||
CreatedBy string `json:"createdBy"`
|
||||
CreatedTime uint64 `json:"createdTime"`
|
||||
DefaultNetID int `json:"def_net_id"`
|
||||
DefaultNetType string `json:"def_net_type"`
|
||||
DeletedBy string `json:"deletedBy"`
|
||||
DeletedTime uint64 `json:"deletedTime"`
|
||||
Desc string `json:"desc"`
|
||||
ID uint `json:"id"`
|
||||
LockStatus string `json:"lockStatus"`
|
||||
Name string `json:"name"`
|
||||
Quota QuotaRecord `json:"resourceLimits"`
|
||||
Status string `json:"status"`
|
||||
UpdatedBy string `json:"updatedBy"`
|
||||
UpdatedTime uint64 `json:"updatedTime"`
|
||||
Vins []int `json:"vins"`
|
||||
Computes []int `json:"vms"`
|
||||
|
||||
Ignored map[string]interface{} `json:"-"`
|
||||
}
|
||||
|
||||
type UserAclRecord struct {
|
||||
IsExplicit bool `json:"explicit"`
|
||||
Rights string `json:"right"`
|
||||
Status string `json:"status"`
|
||||
Type string `json:"type"`
|
||||
UgroupID string `json:"userGroupId"`
|
||||
// CanBeDeleted bool `json:"canBeDeleted"`
|
||||
}
|
||||
|
||||
type QuotaRecord struct { // this is how quota is reported by /api/.../rg/get
|
||||
Cpu int `json:"CU_C"` // CPU count in pcs
|
||||
Ram float64 `json:"CU_M"` // RAM volume in MB, it is STILL reported as FLOAT
|
||||
@@ -134,16 +50,700 @@ type QuotaRecord struct { // this is how quota is reported by /api/.../rg/get
|
||||
GpuUnits int `json:"gpu_units"` // GPU count
|
||||
}
|
||||
|
||||
type ResourceRecord struct { // this is how actual usage is reported by /api/.../rg/get
|
||||
Cpu int `json:"cpu"`
|
||||
Disk int `json:"disksize"`
|
||||
ExtIPs int `json:"extips"`
|
||||
ExtTraffic int `json:"exttraffic"`
|
||||
Gpu int `json:"gpu"`
|
||||
Ram int `json:"ram"`
|
||||
// Main information about audit
|
||||
type ItemAudit struct {
|
||||
// Call
|
||||
Call string `json:"call"`
|
||||
|
||||
// Response time
|
||||
ResponseTime float64 `json:"responsetime"`
|
||||
|
||||
// Status code
|
||||
StatusCode uint64 `json:"statuscode"`
|
||||
|
||||
// Timestamp
|
||||
Timestamp float64 `json:"timestamp"`
|
||||
|
||||
// User
|
||||
User string `json:"user"`
|
||||
}
|
||||
|
||||
type UsageRecord struct {
|
||||
Current ResourceRecord `json:"Current"`
|
||||
Reserved ResourceRecord `json:"Reserved"`
|
||||
// List of audits
|
||||
type ListAudits []ItemAudit
|
||||
|
||||
// Resources used
|
||||
type Resource struct {
|
||||
// Number of cores
|
||||
CPU int64 `json:"cpu"`
|
||||
|
||||
// Disk size
|
||||
DiskSize float64 `json:"disksize"`
|
||||
|
||||
// Max disk size
|
||||
DiskSizeMax uint64 `json:"disksizemax"`
|
||||
|
||||
// Number of External IPs
|
||||
ExtIPs int64 `json:"extips"`
|
||||
|
||||
// External traffic
|
||||
ExtTraffic int64 `json:"exttraffic"`
|
||||
|
||||
// Number of grafic cores
|
||||
GPU int64 `json:"gpu"`
|
||||
|
||||
// Number of RAM
|
||||
RAM int64 `json:"ram"`
|
||||
|
||||
// SEPs
|
||||
SEPs map[string]map[string]DiskUsage `json:"seps"`
|
||||
}
|
||||
|
||||
// Disk usage
|
||||
type DiskUsage struct {
|
||||
// Disk size
|
||||
DiskSize float64 `json:"disksize"`
|
||||
|
||||
// Disk size max
|
||||
DiskSizeMax float64 `json:"disksizemax"`
|
||||
}
|
||||
|
||||
// Information about resources
|
||||
type Resources struct {
|
||||
// Current information about resources
|
||||
Current Resource `json:"Current"`
|
||||
|
||||
// Reserved information about resources
|
||||
Reserved Resource `json:"Reserved"`
|
||||
}
|
||||
|
||||
// Detailed information about resource group
|
||||
type RecordResourceGroup struct {
|
||||
// Resources
|
||||
Resources Resources `json:"Resources"`
|
||||
|
||||
// Account ID
|
||||
AccountID uint64 `json:"accountId"`
|
||||
|
||||
// Account name
|
||||
AccountName string `json:"accountName"`
|
||||
|
||||
// Access Control List
|
||||
ACL ListACL `json:"acl"`
|
||||
|
||||
// Created by
|
||||
CreatedBy string `json:"createdBy"`
|
||||
|
||||
// Created time
|
||||
CreatedTime uint64 `json:"createdTime"`
|
||||
|
||||
// DefNetID
|
||||
DefNetID int64 `json:"def_net_id"`
|
||||
|
||||
// DefNetType
|
||||
DefNetType string `json:"def_net_type"`
|
||||
|
||||
// Deleted by
|
||||
DeletedBy string `json:"deletedBy"`
|
||||
|
||||
// Deleted time
|
||||
DeletedTime uint64 `json:"deletedTime"`
|
||||
|
||||
// Description
|
||||
Description string `json:"desc"`
|
||||
|
||||
// Dirty
|
||||
Dirty bool `json:"dirty"`
|
||||
|
||||
// Grid ID
|
||||
GID uint64 `json:"gid"`
|
||||
|
||||
// GUID
|
||||
GUID uint64 `json:"guid"`
|
||||
|
||||
// ID
|
||||
ID uint64 `json:"id"`
|
||||
|
||||
// Lock status
|
||||
LockStatus string `json:"lockStatus"`
|
||||
|
||||
// Milestones
|
||||
Milestones uint64 `json:"milestones"`
|
||||
|
||||
// Name
|
||||
Name string `json:"name"`
|
||||
|
||||
// RegisterComputes
|
||||
RegisterComputes bool `json:"registerComputes"`
|
||||
|
||||
// Resource limits
|
||||
ResourceLimits ResourceLimits `json:"resourceLimits"`
|
||||
|
||||
// Secret
|
||||
Secret string `json:"secret"`
|
||||
|
||||
// Status
|
||||
Status string `json:"status"`
|
||||
|
||||
// Updated by
|
||||
UpdatedBy string `json:"updatedBy"`
|
||||
|
||||
// Updated time
|
||||
UpdatedTime uint64 `json:"updatedTime"`
|
||||
|
||||
// List of VINS IDs
|
||||
VINS []uint64 `json:"vins"`
|
||||
|
||||
// List of compute IDs
|
||||
Computes []uint64 `json:"vms"`
|
||||
|
||||
// List of resource types
|
||||
ResTypes []string `json:"resourceTypes"`
|
||||
|
||||
// UniqPools
|
||||
UniqPools []string `json:"uniqPools"`
|
||||
}
|
||||
|
||||
// Main information about Access Control List
|
||||
type ItemACL struct {
|
||||
// Explicit
|
||||
Explicit bool `json:"explicit"`
|
||||
|
||||
// GUID
|
||||
GUID string `json:"guid"`
|
||||
|
||||
// Right
|
||||
Right string `json:"right"`
|
||||
|
||||
// Status
|
||||
Status string `json:"status"`
|
||||
|
||||
// Type
|
||||
Type string `json:"type"`
|
||||
|
||||
// User group ID
|
||||
UserGroupID string `json:"userGroupId"`
|
||||
}
|
||||
|
||||
// List ACL
|
||||
type ListACL []ItemACL
|
||||
|
||||
type ItemResourceGroup struct {
|
||||
//
|
||||
AccountACL ItemACL `json:"accountAcl"`
|
||||
|
||||
// Account ID
|
||||
AccountID uint64 `json:"accountId"`
|
||||
|
||||
// Account name
|
||||
AccountName string `json:"accountName"`
|
||||
|
||||
// Access Control List
|
||||
ACL ListACL `json:"acl"`
|
||||
|
||||
// Created by
|
||||
CreatedBy string `json:"createdBy"`
|
||||
|
||||
// Created time
|
||||
CreatedTime uint64 `json:"createdTime"`
|
||||
|
||||
// DefNetID
|
||||
DefNetID int64 `json:"def_net_id"`
|
||||
|
||||
// DefNetType
|
||||
DefNetType string `json:"def_net_type"`
|
||||
|
||||
// Deleted by
|
||||
DeletedBy string `json:"deletedBy"`
|
||||
|
||||
// Deleted time
|
||||
DeletedTime uint64 `json:"deletedTime"`
|
||||
|
||||
// Description
|
||||
Description string `json:"desc"`
|
||||
|
||||
// Dirty
|
||||
Dirty bool `json:"dirty"`
|
||||
|
||||
// Grid ID
|
||||
GID uint64 `json:"gid"`
|
||||
|
||||
// GUID
|
||||
GUID uint64 `json:"guid"`
|
||||
|
||||
// ID
|
||||
ID uint64 `json:"id"`
|
||||
|
||||
// Lock status
|
||||
LockStatus string `json:"lockStatus"`
|
||||
|
||||
// Milestones
|
||||
Milestones uint64 `json:"milestones"`
|
||||
|
||||
// Name
|
||||
Name string `json:"name"`
|
||||
|
||||
// RegisterComputes
|
||||
RegisterComputes bool `json:"registerComputes"`
|
||||
|
||||
// Resource limits
|
||||
ResourceLimits ResourceLimits `json:"resourceLimits"`
|
||||
|
||||
// Secret
|
||||
Secret string `json:"secret"`
|
||||
|
||||
// Status
|
||||
Status string `json:"status"`
|
||||
|
||||
// Updated by
|
||||
UpdatedBy string `json:"updatedBy"`
|
||||
|
||||
// Updated time
|
||||
UpdatedTime uint64 `json:"updatedTime"`
|
||||
|
||||
// List of VINS IDs
|
||||
VINS []uint64 `json:"vins"`
|
||||
|
||||
// List of compute IDs
|
||||
Computes []uint64 `json:"vms"`
|
||||
|
||||
// List of resource types
|
||||
ResTypes []string `json:"resourceTypes"`
|
||||
|
||||
// UniqPools
|
||||
UniqPools []string `json:"uniqPools"`
|
||||
}
|
||||
|
||||
// List of resource groups
|
||||
type ListResourceGroups []ItemResourceGroup
|
||||
|
||||
// Main information about affinity rule
|
||||
type ItemRule struct {
|
||||
// GUID
|
||||
GUID string `json:"guid"`
|
||||
|
||||
// Key
|
||||
Key string `json:"key"`
|
||||
|
||||
// Mode
|
||||
Mode string `json:"mode"`
|
||||
|
||||
// Policy
|
||||
Policy string `json:"policy"`
|
||||
|
||||
// Topology
|
||||
Topology string `json:"topology"`
|
||||
|
||||
// Value
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// List rules
|
||||
type ListRules []ItemRule
|
||||
|
||||
// Main information about compute
|
||||
type ItemCompute struct {
|
||||
// Account ID
|
||||
AccountID uint64 `json:"accountId"`
|
||||
|
||||
// Account name
|
||||
AccountName string `json:"accountName"`
|
||||
|
||||
// Affinity label
|
||||
AffinityLabel string `json:"affinityLabel"`
|
||||
|
||||
// List affinity rules
|
||||
AffinityRules ListRules `json:"affinityRules"`
|
||||
|
||||
// Affinity weight
|
||||
AffinityWeight uint64 `json:"affinityWeight"`
|
||||
|
||||
// Anti affinity rules
|
||||
AntiAffinityRules ListRules `json:"antiAffinityRules"`
|
||||
|
||||
// Number of CPU
|
||||
CPUs uint64 `json:"cpus"`
|
||||
|
||||
// Created by
|
||||
CreatedBy string `json:"createdBy"`
|
||||
|
||||
// Created time
|
||||
CreatedTime uint64 `json:"createdTime"`
|
||||
|
||||
// Deleted by
|
||||
DeletedBy string `json:"deletedBy"`
|
||||
|
||||
// Deleted time
|
||||
DeletedTime uint64 `json:"deletedTime"`
|
||||
|
||||
// ID
|
||||
ID uint64 `json:"id"`
|
||||
|
||||
// Name
|
||||
Name string `json:"name"`
|
||||
|
||||
// Number of RAM
|
||||
RAM uint64 `json:"ram"`
|
||||
|
||||
// Registered
|
||||
Registered bool `json:"registered"`
|
||||
|
||||
// Resource group ID
|
||||
RGID uint64 `json:"rgId"`
|
||||
|
||||
// Resource group name
|
||||
RGName string `json:"rgName"`
|
||||
|
||||
// Status
|
||||
Status string `json:"status"`
|
||||
|
||||
// Tech status
|
||||
TechStatus string `json:"techStatus"`
|
||||
|
||||
// Total disks size
|
||||
TotalDisksSize uint64 `json:"totalDisksSize"`
|
||||
|
||||
// Updated by
|
||||
UpdatedBy string `json:"updatedBy"`
|
||||
|
||||
// Updated time
|
||||
UpdatedTime uint64 `json:"updatedTime"`
|
||||
|
||||
// User managed
|
||||
UserManaged bool `json:"userManaged"`
|
||||
|
||||
// VINS connected
|
||||
VINSConnected uint64 `json:"vinsConnected"`
|
||||
}
|
||||
|
||||
// List computes
|
||||
type ListComputes []ItemCompute
|
||||
|
||||
// Main information about port forward
|
||||
type ItemPFW struct {
|
||||
// Public port end
|
||||
PublicPortEnd uint64 `json:"Public Port End"`
|
||||
|
||||
// Public port start
|
||||
PublicPortStart uint64 `json:"Public Port Start"`
|
||||
|
||||
// Virtual machine ID
|
||||
VMID uint64 `json:"VM ID"`
|
||||
|
||||
// Virtual machine IP
|
||||
VMIP string `json:"VM IP"`
|
||||
|
||||
// Virtual machine name
|
||||
VMName string `json:"VM Name"`
|
||||
|
||||
// Virtual machine port
|
||||
VMPort uint64 `json:"VM Port"`
|
||||
|
||||
// VINS ID
|
||||
VINSID uint64 `json:"ViNS ID"`
|
||||
|
||||
// VINS name
|
||||
VINSName string `json:"ViNS Name"`
|
||||
}
|
||||
|
||||
// List PFWs
|
||||
type ListPFW []ItemPFW
|
||||
|
||||
// Main information about VINS
|
||||
type ItemVINS struct {
|
||||
// Account ID
|
||||
AccountID uint64 `json:"accountId"`
|
||||
|
||||
// Account name
|
||||
AccountName string `json:"accountName"`
|
||||
|
||||
// Computes
|
||||
Computes uint64 `json:"computes"`
|
||||
|
||||
// Created by
|
||||
CreatedBy string `json:"createdBy"`
|
||||
|
||||
// Created time
|
||||
CreatedTime uint64 `json:"createdTime"`
|
||||
|
||||
// Deleted by
|
||||
DeletedBy string `json:"deletedBy"`
|
||||
|
||||
// Deleted time
|
||||
DeletedTime uint64 `json:"deletedTime"`
|
||||
|
||||
// External IP
|
||||
ExternalIP string `json:"externalIP"`
|
||||
|
||||
// ID
|
||||
ID uint64 `json:"id"`
|
||||
|
||||
// Name
|
||||
Name string `json:"name"`
|
||||
|
||||
// Network
|
||||
Network string `json:"network"`
|
||||
|
||||
// PriVNFDev ID
|
||||
PriVNFDevID uint64 `json:"priVnfDevId"`
|
||||
|
||||
// Resource group ID
|
||||
RGID uint64 `json:"rgId"`
|
||||
|
||||
// Resource group name
|
||||
RGName string `json:"rgName"`
|
||||
|
||||
// Status
|
||||
Status string `json:"status"`
|
||||
|
||||
// Updated by
|
||||
UpdatedBy string `json:"updatedBy"`
|
||||
|
||||
// Updated time
|
||||
UpdatedTime uint64 `json:"updatedTime"`
|
||||
}
|
||||
|
||||
// List VINSes
|
||||
type ListVINS []ItemVINS
|
||||
|
||||
// Server settings
|
||||
type ServerSettings struct {
|
||||
// Inter
|
||||
Inter uint64 `json:"inter"`
|
||||
|
||||
// GUID
|
||||
GUID string `json:"guid"`
|
||||
|
||||
// Down inter
|
||||
DownInter uint64 `json:"downinter"`
|
||||
|
||||
// Rise
|
||||
Rise uint64 `json:"rise"`
|
||||
|
||||
// Fall
|
||||
Fall uint64 `json:"fall"`
|
||||
|
||||
// Slow start
|
||||
SlowStart uint64 `json:"slowstart"`
|
||||
|
||||
// Max connections
|
||||
MaxConn uint64 `json:"maxconn"`
|
||||
|
||||
// Max queue
|
||||
MaxQueue uint64 `json:"maxqueue"`
|
||||
|
||||
// Weight
|
||||
Weight uint64 `json:"weight"`
|
||||
}
|
||||
|
||||
// Main information about server
|
||||
type ItemServer struct {
|
||||
// Address
|
||||
Address string `json:"address"`
|
||||
|
||||
// Check
|
||||
Check string `json:"check"`
|
||||
|
||||
// GUID
|
||||
GUID string `json:"guid"`
|
||||
|
||||
// Name
|
||||
Name string `json:"name"`
|
||||
|
||||
// Port
|
||||
Port uint64 `json:"port"`
|
||||
|
||||
// Server settings
|
||||
ServerSettings ServerSettings `json:"serverSettings"`
|
||||
}
|
||||
|
||||
// List of servers
|
||||
type ListServers []ItemServer
|
||||
|
||||
// Main information about backend
|
||||
type ItemBackend struct {
|
||||
// Algorithm
|
||||
Algorithm string `json:"algorithm"`
|
||||
|
||||
// GUID
|
||||
GUID string `json:"guid"`
|
||||
|
||||
// Name
|
||||
Name string `json:"name"`
|
||||
|
||||
// Server settings
|
||||
ServerDefaultSettings ServerSettings `json:"serverDefaultSettings"`
|
||||
|
||||
// List of servers
|
||||
Servers ListServers `json:"servers"`
|
||||
}
|
||||
|
||||
// List of backends
|
||||
type ListBackends []ItemBackend
|
||||
|
||||
// Main information of binding
|
||||
type ItemBinding struct {
|
||||
// Address
|
||||
Address string `json:"address"`
|
||||
|
||||
// GUID
|
||||
GUID string `json:"guid"`
|
||||
|
||||
// Name
|
||||
Name string `json:"name"`
|
||||
|
||||
// Port
|
||||
Port uint64 `json:"port"`
|
||||
}
|
||||
|
||||
// List of bindings
|
||||
type ListBindings []ItemBinding
|
||||
|
||||
// Main information about frontend
|
||||
type ItemFrontend struct {
|
||||
// Backend
|
||||
Backend string `json:"backend"`
|
||||
|
||||
// List of bindings
|
||||
Bindings ListBindings `json:"bindings"`
|
||||
|
||||
// GUID
|
||||
GUID string `json:"guid"`
|
||||
|
||||
// Name
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// List of frontends
|
||||
type ListFrontends []ItemFrontend
|
||||
|
||||
// Main information about node
|
||||
type RecordNode struct {
|
||||
// Backend IP
|
||||
BackendIP string `json:"backendIp"`
|
||||
|
||||
// Compute ID
|
||||
ComputeID uint64 `json:"computeId"`
|
||||
|
||||
// Frontend IP
|
||||
FrontendIP string `json:"frontendIp"`
|
||||
|
||||
// GUID
|
||||
GUID string `json:"guid"`
|
||||
|
||||
// MGMT IP
|
||||
MGMTIP string `json:"mgmtIp"`
|
||||
|
||||
// Network ID
|
||||
NetworkID uint64 `json:"networkId"`
|
||||
}
|
||||
|
||||
// Main information about load balancer
|
||||
type ItemLB struct {
|
||||
// HAMode
|
||||
HAMode bool `json:"HAmode"`
|
||||
|
||||
// List ACL
|
||||
ACL ListACL `json:"acl"`
|
||||
|
||||
// List backends
|
||||
Backends ListBackends `json:"backends"`
|
||||
|
||||
// Created by
|
||||
CreatedBy string `json:"createdBy"`
|
||||
|
||||
// Created time
|
||||
CreatedTime uint64 `json:"createdTime"`
|
||||
|
||||
// Deleted by
|
||||
DeletedBy string `json:"deletedBy"`
|
||||
|
||||
// Deleted time
|
||||
DeletedTime uint64 `json:"deletedTime"`
|
||||
|
||||
// Description
|
||||
Description string `json:"desc"`
|
||||
|
||||
// DPAPI user
|
||||
DPAPIUser string `json:"dpApiUser"`
|
||||
|
||||
// External network ID
|
||||
ExtNetID uint64 `json:"extnetId"`
|
||||
|
||||
// List of frontends
|
||||
Frontends ListFrontends `json:"frontends"`
|
||||
|
||||
// Grid ID
|
||||
GID uint64 `json:"gid"`
|
||||
|
||||
// GUID
|
||||
GUID uint64 `json:"guid"`
|
||||
|
||||
// ID
|
||||
ID uint64 `json:"id"`
|
||||
|
||||
// Image ID
|
||||
ImageID uint64 `json:"imageId"`
|
||||
|
||||
// Milestones
|
||||
Milestones uint64 `json:"milestones"`
|
||||
|
||||
// Name
|
||||
Name string `json:"name"`
|
||||
|
||||
// Primary node
|
||||
PrimaryNode RecordNode `json:"primaryNode"`
|
||||
|
||||
// Resource group ID
|
||||
RGID uint64 `json:"rgId"`
|
||||
|
||||
// Resource group name
|
||||
RGName string `json:"rgName"`
|
||||
|
||||
// Secondary node
|
||||
SecondaryNode RecordNode `json:"secondaryNode"`
|
||||
|
||||
// Status
|
||||
Status string `json:"status"`
|
||||
|
||||
// Tech status
|
||||
TechStatus string `json:"techStatus"`
|
||||
|
||||
// Updated by
|
||||
UpdatedBy string `json:"updatedBy"`
|
||||
|
||||
// Updated time
|
||||
UpdatedTime uint64 `json:"updatedTime"`
|
||||
|
||||
// VINS ID
|
||||
VINSID uint64 `json:"vinsId"`
|
||||
}
|
||||
|
||||
// List load balancers
|
||||
type ListLB []ItemLB
|
||||
|
||||
// Main information about affinity group
|
||||
type ItemAffinityGroupCompute struct {
|
||||
// Compute ID
|
||||
ComputeID uint64 `json:"computeId"`
|
||||
|
||||
// Other node
|
||||
OtherNode []uint64 `json:"otherNode"`
|
||||
|
||||
// Other node indirect
|
||||
OtherNodeIndirect []uint64 `json:"otherNodeIndirect"`
|
||||
|
||||
// Other node indirect soft
|
||||
OtherNodeIndirectSoft []uint64 `json:"otherNodeIndirectSoft"`
|
||||
|
||||
// Other node soft
|
||||
OtherNodeSoft []uint64 `json:"otherNodeSoft"`
|
||||
|
||||
// Same node
|
||||
SameNode []uint64 `json:"sameNode"`
|
||||
|
||||
// Same node soft
|
||||
SameNodeSoft []uint64 `json:"sameNodeSoft"`
|
||||
}
|
||||
|
||||
// List of affinity groups
|
||||
type ListAffinityGroupCompute []ItemAffinityGroupCompute
|
||||
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -44,32 +45,33 @@ func makeQuotaRecord(arg_list []interface{}) QuotaRecord {
|
||||
ExtIPs: -1,
|
||||
GpuUnits: -1,
|
||||
}
|
||||
subres_data := arg_list[0].(map[string]interface{})
|
||||
if len(arg_list) != 0 {
|
||||
subres_data := arg_list[0].(map[string]interface{})
|
||||
|
||||
if subres_data["cpu"].(int) > 0 {
|
||||
quota.Cpu = subres_data["cpu"].(int)
|
||||
if subres_data["cpu"].(int) > 0 {
|
||||
quota.Cpu = subres_data["cpu"].(int)
|
||||
}
|
||||
|
||||
if subres_data["disk"].(int) > 0 {
|
||||
quota.Disk = subres_data["disk"].(int) // Disk capacity ib GB
|
||||
}
|
||||
|
||||
if subres_data["ram"].(float64) > 0 {
|
||||
quota.Ram = subres_data["ram"].(float64) // RAM volume in MB, as float64!
|
||||
}
|
||||
|
||||
if subres_data["ext_traffic"].(int) > 0 {
|
||||
quota.ExtTraffic = subres_data["ext_traffic"].(int)
|
||||
}
|
||||
|
||||
if subres_data["ext_ips"].(int) > 0 {
|
||||
quota.ExtIPs = subres_data["ext_ips"].(int)
|
||||
}
|
||||
|
||||
if subres_data["gpu_units"].(int) > 0 {
|
||||
quota.GpuUnits = subres_data["gpu_units"].(int)
|
||||
}
|
||||
}
|
||||
|
||||
if subres_data["disk"].(int) > 0 {
|
||||
quota.Disk = subres_data["disk"].(int) // Disk capacity ib GB
|
||||
}
|
||||
|
||||
if subres_data["ram"].(float64) > 0 {
|
||||
quota.Ram = subres_data["ram"].(float64) // RAM volume in MB, as float64!
|
||||
}
|
||||
|
||||
if subres_data["ext_traffic"].(int) > 0 {
|
||||
quota.ExtTraffic = subres_data["ext_traffic"].(int)
|
||||
}
|
||||
|
||||
if subres_data["ext_ips"].(int) > 0 {
|
||||
quota.ExtIPs = subres_data["ext_ips"].(int)
|
||||
}
|
||||
|
||||
if subres_data["gpu_units"].(int) > 0 {
|
||||
quota.GpuUnits = subres_data["gpu_units"].(int)
|
||||
}
|
||||
|
||||
return quota
|
||||
}
|
||||
|
||||
|
||||
111
internal/service/cloudapi/rg/resource_check_input_values.go
Normal file
111
internal/service/cloudapi/rg/resource_check_input_values.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func existAccountID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
urlValues := &url.Values{}
|
||||
|
||||
accountList := []struct {
|
||||
ID int `json:"id"`
|
||||
}{}
|
||||
|
||||
accountListAPI := "/restmachine/cloudapi/account/list"
|
||||
|
||||
accountListRaw, err := c.DecortAPICall(ctx, "POST", accountListAPI, urlValues)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(accountListRaw), &accountList)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
haveAccount := false
|
||||
|
||||
myAccount := d.Get("account_id").(int)
|
||||
for _, account := range accountList {
|
||||
if account.ID == myAccount {
|
||||
haveAccount = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return haveAccount, nil
|
||||
}
|
||||
func existGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
urlValues := &url.Values{}
|
||||
|
||||
locationList := []struct {
|
||||
GID int `json:"gid"`
|
||||
}{}
|
||||
|
||||
locationsListAPI := "/restmachine/cloudapi/locations/list"
|
||||
|
||||
locationListRaw, err := c.DecortAPICall(ctx, "POST", locationsListAPI, urlValues)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(locationListRaw), &locationList)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
haveGID := false
|
||||
|
||||
myGID := d.Get("gid").(int)
|
||||
for _, location := range locationList {
|
||||
if location.GID == myGID {
|
||||
haveGID = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return haveGID, nil
|
||||
}
|
||||
func existExtNetID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
urlValues := &url.Values{}
|
||||
urlValues.Add("accountId", strconv.Itoa(d.Get("account_id").(int)))
|
||||
|
||||
listExtNet := []struct {
|
||||
ID int `json:"id"`
|
||||
}{}
|
||||
|
||||
extNetListAPI := "/restmachine/cloudapi/extnet/list"
|
||||
|
||||
listExtNetRaw, err := c.DecortAPICall(ctx, "POST", extNetListAPI, urlValues)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(listExtNetRaw), &listExtNet)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
haveExtNet := false
|
||||
|
||||
myExtNetID := d.Get("ext_net_id").(int)
|
||||
for _, extNet := range listExtNet {
|
||||
if extNet.ID == myExtNetID {
|
||||
haveExtNet = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return haveExtNet, nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -34,19 +35,17 @@ package rg
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
)
|
||||
|
||||
// On success this function returns a string, as returned by API rg/get, which could be unmarshalled
|
||||
// into ResgroupGetResp structure
|
||||
func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) {
|
||||
func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*RecordResourceGroup, error) {
|
||||
// This function tries to locate resource group by one of the following algorithms depending
|
||||
// on the parameters passed:
|
||||
// - if resource group ID is specified -> by RG ID
|
||||
@@ -67,73 +66,47 @@ func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
|
||||
// make it possible to use "read" & "check presence" functions with RG ID set so
|
||||
// that Import of RG resource is possible
|
||||
idSet := false
|
||||
theId, err := strconv.Atoi(d.Id())
|
||||
if err != nil || theId <= 0 {
|
||||
rgId, argSet := d.GetOk("rg_id")
|
||||
if argSet {
|
||||
theId = rgId.(int)
|
||||
idSet = true
|
||||
}
|
||||
if d.Id() != "" {
|
||||
urlValues.Add("rgId", d.Id())
|
||||
} else {
|
||||
idSet = true
|
||||
urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int)))
|
||||
}
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
urlValues.Add("reason", reason.(string))
|
||||
}
|
||||
|
||||
if idSet {
|
||||
// go straight for the RG by its ID
|
||||
log.Debugf("utilityResgroupCheckPresence: locating RG by its ID %d", theId)
|
||||
urlValues.Add("rgId", fmt.Sprintf("%d", theId))
|
||||
rgFacts, err := c.DecortAPICall(ctx, "POST", ResgroupGetAPI, urlValues)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return rgFacts, nil
|
||||
}
|
||||
|
||||
rgName, argSet := d.GetOk("name")
|
||||
if !argSet {
|
||||
// no RG ID and no RG name - we cannot locate resource group in this case
|
||||
return "", fmt.Errorf("Cannot check resource group presence if name is empty and no resource group ID specified")
|
||||
}
|
||||
|
||||
// Valid account ID is required to locate a resource group
|
||||
// obtain Account ID by account name - it should not be zero on success
|
||||
|
||||
urlValues.Add("includedeleted", "false")
|
||||
apiResp, err := c.DecortAPICall(ctx, "POST", ResgroupListAPI, urlValues)
|
||||
rgData := &RecordResourceGroup{}
|
||||
rgRaw, err := c.DecortAPICall(ctx, "POST", ResgroupGetAPI, urlValues)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
// log.Debugf("%s", apiResp)
|
||||
log.Debugf("utilityResgroupCheckPresence: ready to decode response body from %s", ResgroupListAPI)
|
||||
model := ResgroupListResp{}
|
||||
err = json.Unmarshal([]byte(apiResp), &model)
|
||||
|
||||
err = json.Unmarshal([]byte(rgRaw), rgData)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugf("utilityResgroupCheckPresence: traversing decoded Json of length %d", len(model))
|
||||
for index, item := range model {
|
||||
// match by RG name & account ID
|
||||
if item.Name == rgName.(string) && item.AccountID == d.Get("account_id").(int) {
|
||||
log.Debugf("utilityResgroupCheckPresence: match RG name %s / ID %d, account ID %d at index %d",
|
||||
item.Name, item.ID, item.AccountID, index)
|
||||
|
||||
// not all required information is returned by rg/list API, so we need to initiate one more
|
||||
// call to rg/get to obtain extra data to complete Resource population.
|
||||
// Namely, we need resource quota settings
|
||||
reqValues := &url.Values{}
|
||||
reqValues.Add("rgId", fmt.Sprintf("%d", item.ID))
|
||||
apiResp, err := c.DecortAPICall(ctx, "POST", ResgroupGetAPI, reqValues)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return apiResp, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("Cannot find RG name %s owned by account ID %d", rgName, d.Get("account_id").(int))
|
||||
return rgData, nil
|
||||
}
|
||||
|
||||
func utilityDataResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*RecordResourceGroup, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
rgData := &RecordResourceGroup{}
|
||||
|
||||
urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int)))
|
||||
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
urlValues.Add("reason", reason.(string))
|
||||
}
|
||||
|
||||
rgRaw, err := c.DecortAPICall(ctx, "POST", ResgroupGetAPI, urlValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(rgRaw), rgData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rgData, nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityRgAffinityGroupComputesCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListAffinityGroupCompute, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
urlValues := &url.Values{}
|
||||
listGroupComputes := ListAffinityGroupCompute{}
|
||||
|
||||
urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int)))
|
||||
urlValues.Add("affinityGroup", d.Get("affinity_group").(string))
|
||||
|
||||
listGroupComputesRaw, err := c.DecortAPICall(ctx, "POST", RgAffinityGroupComputesAPI, urlValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = json.Unmarshal([]byte(listGroupComputesRaw), &listGroupComputes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return listGroupComputes, nil
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityRgAffinityGroupsGetCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) ([]uint64, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
urlValues := &url.Values{}
|
||||
computes := make([]uint64, 0)
|
||||
|
||||
urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int)))
|
||||
urlValues.Add("affinityGroup", d.Get("affinity_group").(string))
|
||||
|
||||
computesRaw, err := c.DecortAPICall(ctx, "POST", RgAffinityGroupsGetAPI, urlValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(computesRaw), &computes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return computes, nil
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityRgAffinityGroupsListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (map[string][]uint64, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
urlValues := &url.Values{}
|
||||
groups := make(map[string][]uint64, 0)
|
||||
|
||||
urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int)))
|
||||
|
||||
groupsRaw, err := c.DecortAPICall(ctx, "POST", RgAffinityGroupsListAPI, urlValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(groupsRaw), &groups)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return groups, nil
|
||||
}
|
||||
30
internal/service/cloudapi/rg/utility_rg_audits.go
Normal file
30
internal/service/cloudapi/rg/utility_rg_audits.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityRgAuditsCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListAudits, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
urlValues := &url.Values{}
|
||||
rgAudits := ListAudits{}
|
||||
|
||||
urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int)))
|
||||
rgAuditsRow, err := c.DecortAPICall(ctx, "POST", RgAuditsAPI, urlValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = json.Unmarshal([]byte(rgAuditsRow), &rgAudits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rgAudits, nil
|
||||
}
|
||||
@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -43,11 +44,11 @@ import (
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
)
|
||||
|
||||
func utilityRgListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ResgroupListResp, error) {
|
||||
func utilityRgListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListResourceGroups, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
|
||||
rgList := ResgroupListResp{}
|
||||
rgList := ListResourceGroups{}
|
||||
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
urlValues.Add("size", strconv.Itoa(size.(int)))
|
||||
|
||||
35
internal/service/cloudapi/rg/utility_rg_list_computes.go
Normal file
35
internal/service/cloudapi/rg/utility_rg_list_computes.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityRgListComputesCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListComputes, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
|
||||
listComputes := ListComputes{}
|
||||
|
||||
urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int)))
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
urlValues.Add("reason", reason.(string))
|
||||
}
|
||||
|
||||
listComputesRaw, err := c.DecortAPICall(ctx, "POST", RgListComputesAPI, urlValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(listComputesRaw), &listComputes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return listComputes, nil
|
||||
}
|
||||
37
internal/service/cloudapi/rg/utility_rg_list_deleted.go
Normal file
37
internal/service/cloudapi/rg/utility_rg_list_deleted.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityRgListDeletedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListResourceGroups, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
|
||||
rgList := ListResourceGroups{}
|
||||
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
urlValues.Add("size", strconv.Itoa(size.(int)))
|
||||
}
|
||||
if page, ok := d.GetOk("page"); ok {
|
||||
urlValues.Add("page", strconv.Itoa(page.(int)))
|
||||
}
|
||||
|
||||
rgListRaw, err := c.DecortAPICall(ctx, "POST", ResgroupListDeletedAPI, urlValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(rgListRaw), &rgList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rgList, nil
|
||||
}
|
||||
32
internal/service/cloudapi/rg/utility_rg_list_lb.go
Normal file
32
internal/service/cloudapi/rg/utility_rg_list_lb.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityRgListLbCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListLB, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
|
||||
listLb := ListLB{}
|
||||
|
||||
urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int)))
|
||||
|
||||
listLbRaw, err := c.DecortAPICall(ctx, "POST", ResgroupListLbAPI, urlValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(listLbRaw), &listLb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return listLb, nil
|
||||
}
|
||||
32
internal/service/cloudapi/rg/utility_rg_list_pfw.go
Normal file
32
internal/service/cloudapi/rg/utility_rg_list_pfw.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package rg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityRgListPfwCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (ListPFW, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
|
||||
listPfw := ListPFW{}
|
||||
|
||||
urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int)))
|
||||
|
||||
listPfwRaw, err := c.DecortAPICall(ctx, "POST", ResgroupListPfwAPI, urlValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(listPfwRaw), &listPfw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return listPfw, nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user