gos_tech_4.4.3
stSolo 2 years ago
parent a355247845
commit 4d865ae921

@ -2,7 +2,7 @@ name: Release
on: on:
push: push:
tags: tags:
- 'v*' - '*'
jobs: jobs:
release: release:

@ -1,10 +1,51 @@
### Version 3.3.1 ### Version 3.4.0
### Bug fixes
- Fix bug with sample decort_k8s_wg
- Fix bug with the absence of an optional field ext_ip_addr with vins resource
### Features ### Features
- Change cloud_init in vins resource, the field shows full information about the configuration made by the user - Add "seps" to the data source decort_account
- Add "seps" to the resource decort_account
- Add "shareable" to the data source decort_account_disk_list
- Change "compute_id" and "compute_name" to "computes" in data source decort_disks
- Change "compute_id" and "compute_name" to "computes" in resource decort_disks
- Change "compute_id" and "compute_name" to "computes" in data source decort_disks_list
- Add "shareable" to the data source decort_disk
- Add "shareable" to the resource decort_disk
- Add "shareable" to the data source decort_disk_list
- Add optional field "share" to the resource decort_account_disk_list
- Add "present_to" to the data source decort_disk
- Add "present_to" to the data source decort_disk_list
- Add "present_to" to the resource decort_disk
- Add "shareable" to the data source decort_kvmvm
- Add "shareable" to the resource decort_kvmvm
- Add "size_max" to the data source decort_kvmvm
- Add "size_max" to the resource decort_kvmvm
- Add "size_used" to the data source decort_kvmvm
- Add "size_used" to the resource decort_kvmvm
- Add "present_to" to the data source decort_image
- Add "present_to" to the resource decort_image
- Add optional field "labels" to the resource decort_k8s
- Add optional field "taints" to the resource decort_k8s
- Add optional field "annotations" to the resource decort_k8s
- Add optional field "sep_id" in block masters in the resource decort_k8s
- Add optional field "sep_pool" in block masters in the resource decort_k8s
- Add optional field "sep_id" in block workers in the resource decort_k8s
- Add optional field "sep_pool" in block workers in the resource decort_k8s
- Add "gid" to the data source decort_resgroup
- Add "resources" to the data source decort_resgroup
- Add "status" to the data source decort_resgroup
- Add "vins" to the data source decort_resgroup
- Add "vms" to the data source decort_resgroup
- Add "gid" to the resource decort_resgroup
- Add "resources" to the resource decort_resgroup
- Add "status" to the resource decort_resgroup
- Add "vins" to the resource decort_resgroup
- Add "vms" to the resource decort_resgroup
- Add optional field "force" to the resource decort_resgroup
- Add optional field "permanently" to the resource decort_resgroup
- Add optional field "reason" to the resource decort_resgroup
- Add "resources" to the resource decort_resgroup
- Add "gid" to the resource decort_resgroup
- Add "resources" to the data source decort_resgroup
- Add "gid" to the data source decort_resgroup
- Change required field "account_id" to optional in data source decort_resgroup

@ -1,10 +0,0 @@
FROM docker.io/hashicorp/terraform:latest
WORKDIR /opt/decort/tf/
COPY provider.tf ./
COPY terraform-provider-decort ./terraform.d/plugins/digitalenergy.online/decort/decort/3.3.1/linux_amd64/
RUN terraform init
WORKDIR /tf
COPY entrypoint.sh /
ENTRYPOINT ["/entrypoint.sh", "/bin/terraform"]

@ -1,52 +0,0 @@
pipeline {
agent {
kubernetes {
yaml '''
apiVersion: v1
kind: Pod
spec:
containers:
- name: alpine
image: alpine:3.15
command:
- sleep
- infinity
'''
}
}
stages {
stage('Dependency check') {
environment {
DEPCHECKDB = credentials('depcheck-postgres')
}
steps {
container('alpine') {
sh 'apk update && apk add openjdk11 java-postgresql-jdbc go'
dependencyCheck additionalArguments: '-f JSON -f HTML -n --enableExperimental \
-l deplog \
--dbDriverName org.postgresql.Driver \
--dbDriverPath /usr/share/java/postgresql-jdbc.jar \
--dbUser $DEPCHECKDB_USR \
--dbPassword $DEPCHECKDB_PSW \
--connectionString jdbc:postgresql://postgres-postgresql.postgres/depcheck', odcInstallation: 'depcheck'
sh 'cat deplog'
}
}
}
stage('SonarQube analysis') {
environment {
SONARSCANNER_HOME = tool 'sonarscanner'
}
steps {
withSonarQubeEnv('sonarqube') {
sh '$SONARSCANNER_HOME/bin/sonar-scanner'
}
}
}
stage('SonarQube quality gate') {
steps {
waitForQualityGate webhookSecretId: 'sonar-webhook', abortPipeline: true
}
}
}
}

@ -2,12 +2,15 @@
Terraform provider для платформы Digital Energy Cloud Orchestration Technology (DECORT) Terraform provider для платформы Digital Energy Cloud Orchestration Technology (DECORT)
Внимание: провайдер версии 3.x разработан для DECORT API 3.8.x. ## Соответсвие версий платформы версиям провайдера
Для более старых версий можно использовать:
| Версия DECORT API | Версия провайдера Terraform |
- DECORT API 3.7.x - версия провайдера rc-1.25 | ------ | ------ |
- DECORT API 3.6.x - версия провайдера rc-1.10 | 3.8.5 | 3.4.x |
- DECORT API до 3.6.0 - terraform DECS provider (https://github.com/rudecs/terraform-provider-decs) | 3.8.0 - 3.8.4 | 3.3.1 |
| 3.7.x | rc-1.25 |
| 3.6.x | rc-1.10 |
| до 3.6.0 | [terraform-provider-decs](https://github.com/rudecs/terraform-provider-decs) |
## Режимы работы ## Режимы работы

@ -2,11 +2,15 @@
Terraform provider for Digital Energy Cloud Orchestration Technology (DECORT) platform Terraform provider for Digital Energy Cloud Orchestration Technology (DECORT) platform
NOTE: provider 3.x is designed for DECORT API 3.8.x. For older API versions please use: ## Mapping of platform versions with provider versions
- DECORT API 3.7.x versions - provider verion rc-1.25 | DECORT API version | Terraform provider version |
- DECORT API 3.6.x versions - provider version rc-1.10 | ------ | ------ |
- DECORT API versions prior to 3.6.0 - Terraform DECS provider (https://github.com/rudecs/terraform-provider-decs) | 3.8.5 | 3.4.x |
| 3.8.0 - 3.8.4 | 3.3.1 |
| 3.7.x | rc-1.25 |
| 3.6.x | rc-1.10 |
| до 3.6.0 | [terraform-provider-decs](https://github.com/rudecs/terraform-provider-decs) |
## Working modes ## Working modes

@ -1,4 +0,0 @@
#!/bin/sh
cp -aL /opt/decort/tf/* /opt/decort/tf/.* ./
exec "$@"

@ -30,6 +30,7 @@ builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/ */
//Diagnostics Collector
package dc package dc
import "github.com/hashicorp/terraform-plugin-sdk/v2/diag" import "github.com/hashicorp/terraform-plugin-sdk/v2/diag"

@ -141,6 +141,22 @@ func flattenAccResources(r Resources) []map[string]interface{} {
return res return res
} }
func flattenAccountSeps(seps map[string]map[string]ResourceSep) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for sepKey, sepVal := range seps {
for dataKey, dataVal := range sepVal {
temp := map[string]interface{}{
"sep_id": sepKey,
"data_name": dataKey,
"disk_size": dataVal.DiskSize,
"disk_size_max": dataVal.DiskSizeMax,
}
res = append(res, temp)
}
}
return res
}
func flattenAccResource(r Resource) []map[string]interface{} { func flattenAccResource(r Resource) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{ temp := map[string]interface{}{
@ -150,6 +166,7 @@ func flattenAccResource(r Resource) []map[string]interface{} {
"exttraffic": r.Exttraffic, "exttraffic": r.Exttraffic,
"gpu": r.GPU, "gpu": r.GPU,
"ram": r.RAM, "ram": r.RAM,
"seps": flattenAccountSeps(r.SEPs),
} }
res = append(res, temp) res = append(res, temp)
return res return res
@ -161,6 +178,7 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
}, },
"dc_location": { "dc_location": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
@ -199,6 +217,30 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
}, },
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
}, },
}, },
}, },
@ -231,6 +273,30 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
}, },
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
}, },
}, },
}, },

@ -47,6 +47,7 @@ func flattenAccountDisksList(adl AccountDisksList) []map[string]interface{} {
"disk_name": ad.Name, "disk_name": ad.Name,
"pool": ad.Pool, "pool": ad.Pool,
"sep_id": ad.SepId, "sep_id": ad.SepId,
"shareable": ad.Shareable,
"size_max": ad.SizeMax, "size_max": ad.SizeMax,
"type": ad.Type, "type": ad.Type,
} }
@ -98,6 +99,10 @@ func dataSourceAccountDisksListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
}, },
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": { "size_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,

@ -89,13 +89,19 @@ type AccountCloudApi struct {
type AccountCloudApiList []AccountCloudApi type AccountCloudApiList []AccountCloudApi
type ResourceSep struct {
DiskSize float64 `json:"disksize"`
DiskSizeMax int `json:"disksizemax"`
}
type Resource struct { type Resource struct {
CPU int `json:"cpu"` CPU int `json:"cpu"`
Disksize int `json:"disksize"` Disksize int `json:"disksize"`
Extips int `json:"extips"` Extips int `json:"extips"`
Exttraffic int `json:"exttraffic"` Exttraffic int `json:"exttraffic"`
GPU int `json:"gpu"` GPU int `json:"gpu"`
RAM int `json:"ram"` RAM int `json:"ram"`
SEPs map[string]map[string]ResourceSep `json:"seps"`
} }
type Resources struct { type Resources struct {
@ -147,12 +153,13 @@ type AccountCompute struct {
type AccountComputesList []AccountCompute type AccountComputesList []AccountCompute
type AccountDisk struct { type AccountDisk struct {
ID int `json:"id"` ID int `json:"id"`
Name string `json:"name"` Name string `json:"name"`
Pool string `json:"pool"` Pool string `json:"pool"`
SepId int `json:"sepId"` SepId int `json:"sepId"`
SizeMax int `json:"sizeMax"` Shareable bool `json:"shareable"`
Type string `json:"type"` SizeMax int `json:"sizeMax"`
Type string `json:"type"`
} }
type AccountDisksList []AccountDisk type AccountDisksList []AccountDisk

@ -593,6 +593,30 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
}, },
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
}, },
}, },
}, },
@ -625,6 +649,30 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
}, },
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
}, },
}, },
}, },

@ -47,4 +47,6 @@ const (
disksSnapshotDeleteAPI = "/restmachine/cloudapi/disks/snapshotDelete" disksSnapshotDeleteAPI = "/restmachine/cloudapi/disks/snapshotDelete"
disksSnapshotRollbackAPI = "/restmachine/cloudapi/disks/snapshotRollback" disksSnapshotRollbackAPI = "/restmachine/cloudapi/disks/snapshotRollback"
disksShareAPI = "/restmachine/cloudapi/disks/share"
disksUnshareAPI = "/restmachine/cloudapi/disks/unshare"
) )

@ -60,8 +60,7 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface
d.Set("account_name", disk.AccountName) d.Set("account_name", disk.AccountName)
d.Set("acl", string(diskAcl)) d.Set("acl", string(diskAcl))
d.Set("boot_partition", disk.BootPartition) d.Set("boot_partition", disk.BootPartition)
d.Set("compute_id", disk.ComputeID) d.Set("computes", flattenDiskComputes(disk.Computes))
d.Set("compute_name", disk.ComputeName)
d.Set("created_time", disk.CreatedTime) d.Set("created_time", disk.CreatedTime)
d.Set("deleted_time", disk.DeletedTime) d.Set("deleted_time", disk.DeletedTime)
d.Set("desc", disk.Desc) d.Set("desc", disk.Desc)
@ -84,6 +83,7 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface
d.Set("passwd", disk.Passwd) d.Set("passwd", disk.Passwd)
d.Set("pci_slot", disk.PciSlot) d.Set("pci_slot", disk.PciSlot)
d.Set("pool", disk.Pool) d.Set("pool", disk.Pool)
d.Set("present_to", disk.PresentTo)
d.Set("purge_attempts", disk.PurgeAttempts) d.Set("purge_attempts", disk.PurgeAttempts)
d.Set("purge_time", disk.PurgeTime) d.Set("purge_time", disk.PurgeTime)
d.Set("reality_device_number", disk.RealityDeviceNumber) d.Set("reality_device_number", disk.RealityDeviceNumber)
@ -93,6 +93,7 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface
d.Set("role", disk.Role) d.Set("role", disk.Role)
d.Set("sep_id", disk.SepID) d.Set("sep_id", disk.SepID)
d.Set("sep_type", disk.SepType) d.Set("sep_type", disk.SepType)
d.Set("shareable", disk.Shareable)
d.Set("size_max", disk.SizeMax) d.Set("size_max", disk.SizeMax)
d.Set("size_used", disk.SizeUsed) d.Set("size_used", disk.SizeUsed)
d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots)) d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots))
@ -130,15 +131,21 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Computed: true, Computed: true,
Description: "Number of disk partitions", Description: "Number of disk partitions",
}, },
"compute_id": { "computes": {
Type: schema.TypeInt, Type: schema.TypeList,
Computed: true, Computed: true,
Description: "Compute ID", Elem: &schema.Resource{
}, Schema: map[string]*schema.Schema{
"compute_name": { "compute_id": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Compute name", },
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
}, },
"created_time": { "created_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
@ -316,6 +323,13 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Computed: true, Computed: true,
Description: "Pool for disk location", Description: "Pool for disk location",
}, },
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": { "purge_attempts": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
@ -361,6 +375,10 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Computed: true, Computed: true,
Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform", Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform",
}, },
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": { "size_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,

@ -42,6 +42,18 @@ import (
"github.com/rudecs/terraform-provider-decort/internal/constants" "github.com/rudecs/terraform-provider-decort/internal/constants"
) )
func flattenDiskComputes(computes map[string]string) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for computeKey, computeVal := range computes {
temp := map[string]interface{}{
"compute_id": computeKey,
"compute_name": computeVal,
}
res = append(res, temp)
}
return res
}
func flattenIOTune(iot IOTune) []map[string]interface{} { func flattenIOTune(iot IOTune) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{ temp := map[string]interface{}{
@ -72,9 +84,8 @@ func flattenDiskList(dl DisksList) []map[string]interface{} {
"account_id": disk.AccountID, "account_id": disk.AccountID,
"account_name": disk.AccountName, "account_name": disk.AccountName,
"acl": string(diskAcl), "acl": string(diskAcl),
"computes": flattenDiskComputes(disk.Computes),
"boot_partition": disk.BootPartition, "boot_partition": disk.BootPartition,
"compute_id": disk.ComputeID,
"compute_name": disk.ComputeName,
"created_time": disk.CreatedTime, "created_time": disk.CreatedTime,
"deleted_time": disk.DeletedTime, "deleted_time": disk.DeletedTime,
"desc": disk.Desc, "desc": disk.Desc,
@ -99,6 +110,7 @@ func flattenDiskList(dl DisksList) []map[string]interface{} {
"passwd": disk.Passwd, "passwd": disk.Passwd,
"pci_slot": disk.PciSlot, "pci_slot": disk.PciSlot,
"pool": disk.Pool, "pool": disk.Pool,
"present_to": disk.PresentTo,
"purge_attempts": disk.PurgeAttempts, "purge_attempts": disk.PurgeAttempts,
"purge_time": disk.PurgeTime, "purge_time": disk.PurgeTime,
"reality_device_number": disk.RealityDeviceNumber, "reality_device_number": disk.RealityDeviceNumber,
@ -108,6 +120,7 @@ func flattenDiskList(dl DisksList) []map[string]interface{} {
"role": disk.Role, "role": disk.Role,
"sep_id": disk.SepID, "sep_id": disk.SepID,
"sep_type": disk.SepType, "sep_type": disk.SepType,
"shareable": disk.Shareable,
"size_max": disk.SizeMax, "size_max": disk.SizeMax,
"size_used": disk.SizeUsed, "size_used": disk.SizeUsed,
"snapshots": flattenDiskSnapshotList(disk.Snapshots), "snapshots": flattenDiskSnapshotList(disk.Snapshots),
@ -199,15 +212,21 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Computed: true, Computed: true,
Description: "Number of disk partitions", Description: "Number of disk partitions",
}, },
"compute_id": { "computes": {
Type: schema.TypeInt, Type: schema.TypeList,
Computed: true, Computed: true,
Description: "Compute ID", Elem: &schema.Resource{
}, Schema: map[string]*schema.Schema{
"compute_name": { "compute_id": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Compute name", },
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
}, },
"created_time": { "created_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
@ -400,6 +419,13 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Computed: true, Computed: true,
Description: "Pool for disk location", Description: "Pool for disk location",
}, },
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": { "purge_attempts": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
@ -445,6 +471,10 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Computed: true, Computed: true,
Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform", Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform",
}, },
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": { "size_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,

@ -37,9 +37,8 @@ type Disk struct {
AccountID int `json:"accountId"` AccountID int `json:"accountId"`
AccountName string `json:"accountName"` AccountName string `json:"accountName"`
BootPartition int `json:"bootPartition"` BootPartition int `json:"bootPartition"`
Computes map[string]string `json:"computes"`
CreatedTime uint64 `json:"creationTime"` CreatedTime uint64 `json:"creationTime"`
ComputeID int `json:"computeId"`
ComputeName string `json:"computeName"`
DeletedTime uint64 `json:"deletionTime"` DeletedTime uint64 `json:"deletionTime"`
DeviceName string `json:"devicename"` DeviceName string `json:"devicename"`
Desc string `json:"desc"` Desc string `json:"desc"`
@ -63,6 +62,7 @@ type Disk struct {
ParentId int `json:"parentId"` ParentId int `json:"parentId"`
PciSlot int `json:"pciSlot"` PciSlot int `json:"pciSlot"`
Pool string `json:"pool"` Pool string `json:"pool"`
PresentTo []int `json:"presentTo"`
PurgeTime uint64 `json:"purgeTime"` PurgeTime uint64 `json:"purgeTime"`
PurgeAttempts uint64 `json:"purgeAttempts"` PurgeAttempts uint64 `json:"purgeAttempts"`
RealityDeviceNumber int `json:"realityDeviceNumber"` RealityDeviceNumber int `json:"realityDeviceNumber"`
@ -71,6 +71,7 @@ type Disk struct {
ResName string `json:"resName"` ResName string `json:"resName"`
Role string `json:"role"` Role string `json:"role"`
SepType string `json:"sepType"` SepType string `json:"sepType"`
Shareable bool `json:"shareable"`
SepID int `json:"sepId"` // NOTE: absent from compute/get output SepID int `json:"sepId"` // NOTE: absent from compute/get output
SizeMax int `json:"sizeMax"` SizeMax int `json:"sizeMax"`
SizeUsed int `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space SizeUsed int `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space

@ -113,6 +113,15 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
urlValues = &url.Values{} urlValues = &url.Values{}
} }
if shareable := d.Get("shareable"); shareable.(bool) == true {
urlValues.Add("diskId", diskId)
_, err := c.DecortAPICall(ctx, "POST", disksShareAPI, urlValues)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
}
dgn := resourceDiskRead(ctx, d, m) dgn := resourceDiskRead(ctx, d, m)
if dgn != nil { if dgn != nil {
return dgn return dgn
@ -167,8 +176,7 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
d.Set("account_name", disk.AccountName) d.Set("account_name", disk.AccountName)
d.Set("acl", string(diskAcl)) d.Set("acl", string(diskAcl))
d.Set("boot_partition", disk.BootPartition) d.Set("boot_partition", disk.BootPartition)
d.Set("compute_id", disk.ComputeID) d.Set("computes", flattenDiskComputes(disk.Computes))
d.Set("compute_name", disk.ComputeName)
d.Set("created_time", disk.CreatedTime) d.Set("created_time", disk.CreatedTime)
d.Set("deleted_time", disk.DeletedTime) d.Set("deleted_time", disk.DeletedTime)
d.Set("desc", disk.Desc) d.Set("desc", disk.Desc)
@ -191,6 +199,7 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
d.Set("passwd", disk.Passwd) d.Set("passwd", disk.Passwd)
d.Set("pci_slot", disk.PciSlot) d.Set("pci_slot", disk.PciSlot)
d.Set("pool", disk.Pool) d.Set("pool", disk.Pool)
d.Set("present_to", disk.PresentTo)
d.Set("purge_attempts", disk.PurgeAttempts) d.Set("purge_attempts", disk.PurgeAttempts)
d.Set("purge_time", disk.PurgeTime) d.Set("purge_time", disk.PurgeTime)
d.Set("reality_device_number", disk.RealityDeviceNumber) d.Set("reality_device_number", disk.RealityDeviceNumber)
@ -202,6 +211,7 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
d.Set("sep_type", disk.SepType) d.Set("sep_type", disk.SepType)
d.Set("size_max", disk.SizeMax) d.Set("size_max", disk.SizeMax)
d.Set("size_used", disk.SizeUsed) d.Set("size_used", disk.SizeUsed)
d.Set("shareable", disk.Shareable)
d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots)) d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots))
d.Set("status", disk.Status) d.Set("status", disk.Status)
d.Set("tech_status", disk.TechStatus) d.Set("tech_status", disk.TechStatus)
@ -277,6 +287,24 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
urlValues = &url.Values{} urlValues = &url.Values{}
} }
if d.HasChange("shareable") {
oldShare, newShare := d.GetChange("shareable")
urlValues = &url.Values{}
urlValues.Add("diskId", d.Id())
if oldShare.(bool) == false && newShare.(bool) == true {
_, err := c.DecortAPICall(ctx, "POST", disksShareAPI, urlValues)
if err != nil {
return diag.FromErr(err)
}
}
if oldShare.(bool) == true && newShare.(bool) == false {
_, err := c.DecortAPICall(ctx, "POST", disksUnshareAPI, urlValues)
if err != nil {
return diag.FromErr(err)
}
}
}
return resourceDiskRead(ctx, d, m) return resourceDiskRead(ctx, d, m)
} }
@ -335,6 +363,13 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Computed: true, Computed: true,
Description: "Pool for disk location", Description: "Pool for disk location",
}, },
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"sep_id": { "sep_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
@ -354,7 +389,6 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
ValidateFunc: validation.StringInSlice([]string{"D", "B", "T"}, false), ValidateFunc: validation.StringInSlice([]string{"D", "B", "T"}, false),
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
}, },
"detach": { "detach": {
Type: schema.TypeBool, Type: schema.TypeBool,
Optional: true, Optional: true,
@ -373,6 +407,12 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Default: "", Default: "",
Description: "Reason for deletion", Description: "Reason for deletion",
}, },
"shareable": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Default: false,
},
"disk_id": { "disk_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
@ -393,15 +433,21 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Computed: true, Computed: true,
Description: "Number of disk partitions", Description: "Number of disk partitions",
}, },
"compute_id": { "computes": {
Type: schema.TypeInt, Type: schema.TypeList,
Computed: true, Computed: true,
Description: "Compute ID", Elem: &schema.Resource{
}, Schema: map[string]*schema.Schema{
"compute_name": { "compute_id": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Compute name", },
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
}, },
"created_time": { "created_time": {
Type: schema.TypeInt, Type: schema.TypeInt,

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -80,6 +81,7 @@ func flattenImage(d *schema.ResourceData, img *ImageExtend) {
d.Set("pool_name", img.Pool) d.Set("pool_name", img.Pool)
d.Set("provider_name", img.ProviderName) d.Set("provider_name", img.ProviderName)
d.Set("purge_attempts", img.PurgeAttempts) d.Set("purge_attempts", img.PurgeAttempts)
d.Set("present_to", img.PresentTo)
d.Set("res_id", img.ResId) d.Set("res_id", img.ResId)
d.Set("rescuecd", img.RescueCD) d.Set("rescuecd", img.RescueCD)
d.Set("sep_id", img.SepId) d.Set("sep_id", img.SepId)

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -161,6 +162,13 @@ func dataSourceImageExtendSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
}, },
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"res_id": { "res_id": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -134,6 +135,7 @@ type ImageExtend struct {
Password string `json:"password"` Password string `json:"password"`
Pool string `json:"pool"` Pool string `json:"pool"`
ProviderName string `json:"provider_name"` ProviderName string `json:"provider_name"`
PresentTo []int `json:"presentTo"`
PurgeAttempts int `json:"purgeAttempts"` PurgeAttempts int `json:"purgeAttempts"`
ResId string `json:"resId"` ResId string `json:"resId"`
RescueCD bool `json:"rescuecd"` RescueCD bool `json:"rescuecd"`

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -49,6 +49,8 @@ type K8sNodeRecord struct {
ID int `json:"id"` ID int `json:"id"`
Name string `json:"name"` Name string `json:"name"`
} `json:"detailedInfo"` } `json:"detailedInfo"`
SepID int `json:"SepId"`
SepPool string `json:"SepPool"`
} }
//K8sRecord represents k8s instance //K8sRecord represents k8s instance

@ -56,10 +56,12 @@ func parseNode(nodeList []interface{}) K8sNodeRecord {
node := nodeList[0].(map[string]interface{}) node := nodeList[0].(map[string]interface{})
return K8sNodeRecord{ return K8sNodeRecord{
Num: node["num"].(int), Num: node["num"].(int),
Cpu: node["cpu"].(int), Cpu: node["cpu"].(int),
Ram: node["ram"].(int), Ram: node["ram"].(int),
Disk: node["disk"].(int), Disk: node["disk"].(int),
SepID: node["sep_id"].(int),
SepPool: node["sep_pool"].(string),
} }
} }
@ -112,6 +114,14 @@ func mastersSchemaMake() map[string]*schema.Schema {
Required: true, Required: true,
Description: "Number of nodes to create.", Description: "Number of nodes to create.",
} }
masters["sep_id"] = &schema.Schema{
Type: schema.TypeInt,
Optional: true,
}
masters["sep_pool"] = &schema.Schema{
Type: schema.TypeString,
Optional: true,
}
masters["cpu"] = &schema.Schema{ masters["cpu"] = &schema.Schema{
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
@ -140,6 +150,14 @@ func workersSchemaMake() map[string]*schema.Schema {
Required: true, Required: true,
Description: "Number of nodes to create.", Description: "Number of nodes to create.",
} }
workers["sep_id"] = &schema.Schema{
Type: schema.TypeInt,
Optional: true,
}
workers["sep_pool"] = &schema.Schema{
Type: schema.TypeString,
Optional: true,
}
workers["cpu"] = &schema.Schema{ workers["cpu"] = &schema.Schema{
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,

@ -69,6 +69,8 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
urlValues.Add("masterCpu", strconv.Itoa(masterNode.Cpu)) urlValues.Add("masterCpu", strconv.Itoa(masterNode.Cpu))
urlValues.Add("masterRam", strconv.Itoa(masterNode.Ram)) urlValues.Add("masterRam", strconv.Itoa(masterNode.Ram))
urlValues.Add("masterDisk", strconv.Itoa(masterNode.Disk)) urlValues.Add("masterDisk", strconv.Itoa(masterNode.Disk))
urlValues.Add("masterSepId", strconv.Itoa(masterNode.SepID))
urlValues.Add("masterSepPool", masterNode.SepPool)
var workerNode K8sNodeRecord var workerNode K8sNodeRecord
if workers, ok := d.GetOk("workers"); ok { if workers, ok := d.GetOk("workers"); ok {
@ -80,6 +82,29 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
urlValues.Add("workerCpu", strconv.Itoa(workerNode.Cpu)) urlValues.Add("workerCpu", strconv.Itoa(workerNode.Cpu))
urlValues.Add("workerRam", strconv.Itoa(workerNode.Ram)) urlValues.Add("workerRam", strconv.Itoa(workerNode.Ram))
urlValues.Add("workerDisk", strconv.Itoa(workerNode.Disk)) urlValues.Add("workerDisk", strconv.Itoa(workerNode.Disk))
urlValues.Add("workerSepId", strconv.Itoa(workerNode.SepID))
urlValues.Add("workerSepPool", workerNode.SepPool)
if labels, ok := d.GetOk("labels"); ok {
labels := labels.([]interface{})
for _, label := range labels {
urlValues.Add("labels", label.(string))
}
}
if taints, ok := d.GetOk("taints"); ok {
taints := taints.([]interface{})
for _, taint := range taints {
urlValues.Add("taints", taint.(string))
}
}
if annotations, ok := d.GetOk("annotations"); ok {
annotations := annotations.([]interface{})
for _, annotation := range annotations {
urlValues.Add("annotations", annotation.(string))
}
}
if withLB, ok := d.GetOk("with_lb"); ok { if withLB, ok := d.GetOk("with_lb"); ok {
urlValues.Add("withLB", strconv.FormatBool(withLB.(bool))) urlValues.Add("withLB", strconv.FormatBool(withLB.(bool)))
@ -133,7 +158,6 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
} }
func resourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
//log.Debugf("resourceK8sRead: called with id %s, rg %d", d.Id(), d.Get("rg_id").(int))
k8s, err := utilityDataK8sCheckPresence(ctx, d, m) k8s, err := utilityDataK8sCheckPresence(ctx, d, m)
if err != nil { if err != nil {
@ -282,28 +306,45 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Required: true, Required: true,
Description: "Name of the cluster.", Description: "Name of the cluster.",
}, },
"rg_id": { "rg_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, ForceNew: true,
Description: "Resource group ID that this instance belongs to.", Description: "Resource group ID that this instance belongs to.",
}, },
"k8sci_id": { "k8sci_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, ForceNew: true,
Description: "ID of the k8s catalog item to base this instance on.", Description: "ID of the k8s catalog item to base this instance on.",
}, },
"wg_name": { "wg_name": {
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Required: true,
ForceNew: true, ForceNew: true,
Description: "Name for first worker group created with cluster.", Description: "Name for first worker group created with cluster.",
}, },
"labels": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"taints": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"annotations": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"masters": { "masters": {
Type: schema.TypeList, Type: schema.TypeList,
Optional: true, Optional: true,
@ -315,7 +356,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
}, },
Description: "Master node(s) configuration.", Description: "Master node(s) configuration.",
}, },
"workers": { "workers": {
Type: schema.TypeList, Type: schema.TypeList,
Optional: true, Optional: true,
@ -326,7 +366,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
}, },
Description: "Worker node(s) configuration.", Description: "Worker node(s) configuration.",
}, },
"with_lb": { "with_lb": {
Type: schema.TypeBool, Type: schema.TypeBool,
Optional: true, Optional: true,
@ -334,7 +373,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Default: true, Default: true,
Description: "Create k8s with load balancer if true.", Description: "Create k8s with load balancer if true.",
}, },
"extnet_id": { "extnet_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
@ -342,7 +380,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
ForceNew: true, ForceNew: true,
Description: "ID of the external network to connect workers to. If omitted network will be chosen by the platfom.", Description: "ID of the external network to connect workers to. If omitted network will be chosen by the platfom.",
}, },
"desc": { "desc": {
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Optional: true,
@ -417,13 +454,11 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
}, },
"default_wg_id": { "default_wg_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "ID of default workers group for this instace.", Description: "ID of default workers group for this instace.",
}, },
"kubeconfig": { "kubeconfig": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,

@ -45,7 +45,6 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
// "github.com/hashicorp/terraform-plugin-sdk/helper/validation" // "github.com/hashicorp/terraform-plugin-sdk/helper/validation"
) )
@ -125,10 +124,10 @@ func findInExtraDisks(DiskId uint, ExtraDisks []interface{}) bool {
return false return false
} }
func flattenComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) []map[string]interface{} { func flattenDataComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0)
for _, disk := range disksList { for _, disk := range disksList {
if disk.Name == "bootdisk" || findInExtraDisks(disk.ID, extraDisks) { //skip main bootdisk and extraDisks if findInExtraDisks(disk.ID, extraDisks) { //skip main bootdisk and extraDisks
continue continue
} }
temp := map[string]interface{}{ temp := map[string]interface{}{
@ -136,6 +135,9 @@ func flattenComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) [
"disk_id": disk.ID, "disk_id": disk.ID,
"disk_type": disk.Type, "disk_type": disk.Type,
"sep_id": disk.SepID, "sep_id": disk.SepID,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"pool": disk.Pool, "pool": disk.Pool,
"desc": disk.Desc, "desc": disk.Desc,
"image_id": disk.ImageID, "image_id": disk.ImageID,
@ -146,7 +148,7 @@ func flattenComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) [
return res return res
} }
func flattenCompute(d *schema.ResourceData, compFacts string) error { func flattenDataCompute(d *schema.ResourceData, compFacts string) error {
// This function expects that compFacts string contains response from API compute/get, // This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance. // i.e. detailed information about compute instance.
// //
@ -219,7 +221,7 @@ func flattenCompute(d *schema.ResourceData, compFacts string) error {
} }
} }
err = d.Set("disks", flattenComputeDisksDemo(model.Disks, d.Get("extra_disks").(*schema.Set).List())) err = d.Set("disks", flattenDataComputeDisksDemo(model.Disks, d.Get("extra_disks").(*schema.Set).List()))
if err != nil { if err != nil {
return err return err
} }
@ -236,7 +238,7 @@ func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interf
return diag.FromErr(err) return diag.FromErr(err)
} }
if err = flattenCompute(d, compFacts); err != nil { if err = flattenDataCompute(d, compFacts); err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -353,48 +355,53 @@ func DataSourceCompute() *schema.Resource {
"disks": { "disks": {
Type: schema.TypeList, Type: schema.TypeList,
Computed: true, Computed: true,
Optional: true,
Elem: &schema.Resource{ Elem: &schema.Resource{
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"disk_name": { "disk_name": {
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Computed: true,
Description: "Name for disk", Description: "Name for disk",
}, },
"size": { "size": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Computed: true,
Description: "Disk size in GiB", Description: "Disk size in GiB",
}, },
"disk_type": { "disk_type": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Optional: true, Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data'",
ValidateFunc: validation.StringInSlice([]string{"B", "D"}, false),
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data'",
}, },
"sep_id": { "sep_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Optional: true,
Description: "Storage endpoint provider ID; by default the same with boot disk", Description: "Storage endpoint provider ID; by default the same with boot disk",
}, },
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_used": {
Type: schema.TypeInt,
Computed: true,
},
"pool": { "pool": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Optional: true,
Description: "Pool name; by default will be chosen automatically", Description: "Pool name; by default will be chosen automatically",
}, },
"desc": { "desc": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Optional: true,
Description: "Optional description", Description: "Optional description",
}, },
"image_id": { "image_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Optional: true,
Description: "Specify image id for create disk from template", Description: "Specify image id for create disk from template",
}, },
"disk_id": { "disk_id": {
@ -404,8 +411,7 @@ func DataSourceCompute() *schema.Resource {
}, },
"permanently": { "permanently": {
Type: schema.TypeBool, Type: schema.TypeBool,
Optional: true, Computed: true,
Default: false,
Description: "Disk deletion status", Description: "Disk deletion status",
}, },
}, },

@ -0,0 +1,115 @@
package kvmvm
import (
"encoding/json"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/rudecs/terraform-provider-decort/internal/status"
log "github.com/sirupsen/logrus"
)
func flattenComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, disk := range disksList {
if disk.Name == "bootdisk" || findInExtraDisks(disk.ID, extraDisks) { //skip main bootdisk and extraDisks
continue
}
temp := map[string]interface{}{
"disk_name": disk.Name,
"disk_id": disk.ID,
"disk_type": disk.Type,
"sep_id": disk.SepID,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"pool": disk.Pool,
"desc": disk.Desc,
"image_id": disk.ImageID,
"size": disk.SizeMax,
}
res = append(res, temp)
}
return res
}
func flattenCompute(d *schema.ResourceData, compFacts string) error {
// This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance.
//
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceComputeExists(...) method
model := ComputeGetResp{}
log.Debugf("flattenCompute: ready to unmarshal string %s", compFacts)
err := json.Unmarshal([]byte(compFacts), &model)
if err != nil {
return err
}
log.Debugf("flattenCompute: ID %d, RG ID %d", model.ID, model.RgID)
d.SetId(fmt.Sprintf("%d", model.ID))
// d.Set("compute_id", model.ID) - we should NOT set compute_id in the schema here: if it was set - it is already set, if it wasn't - we shouldn't
d.Set("name", model.Name)
d.Set("rg_id", model.RgID)
d.Set("rg_name", model.RgName)
d.Set("account_id", model.AccountID)
d.Set("account_name", model.AccountName)
d.Set("driver", model.Driver)
d.Set("cpu", model.Cpu)
d.Set("ram", model.Ram)
// d.Set("boot_disk_size", model.BootDiskSize) - bootdiskSize key in API compute/get is always zero, so we set boot_disk_size in another way
if model.VirtualImageID != 0 {
d.Set("image_id", model.VirtualImageID)
} else {
d.Set("image_id", model.ImageID)
}
d.Set("description", model.Desc)
d.Set("enabled", false)
if model.Status == status.Enabled {
d.Set("enabled", true)
}
//d.Set("cloud_init", "applied") // NOTE: for existing compute we hard-code this value as an indicator for DiffSuppress fucntion
//d.Set("status", model.Status)
//d.Set("tech_status", model.TechStatus)
d.Set("started", false)
if model.TechStatus == "STARTED" {
d.Set("started", true)
}
bootDisk := findBootDisk(model.Disks)
d.Set("boot_disk_size", bootDisk.SizeMax)
d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations
d.Set("sep_id", bootDisk.SepID)
d.Set("pool", bootDisk.Pool)
//if len(model.Disks) > 0 {
//log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(model.Disks))
//if err = d.Set("extra_disks", parseComputeDisksToExtraDisks(model.Disks)); err != nil {
//return err
//}
//}
if len(model.Interfaces) > 0 {
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(model.Interfaces))
if err = d.Set("network", parseComputeInterfacesToNetworks(model.Interfaces)); err != nil {
return err
}
}
if len(model.OsUsers) > 0 {
log.Debugf("flattenCompute: calling parseOsUsers for %d logins", len(model.OsUsers))
if err = d.Set("os_users", parseOsUsers(model.OsUsers)); err != nil {
return err
}
}
err = d.Set("disks", flattenComputeDisksDemo(model.Disks, d.Get("extra_disks").(*schema.Set).List()))
if err != nil {
return err
}
return nil
}

@ -72,6 +72,7 @@ type DiskRecord struct {
Role string `json:"role"` Role string `json:"role"`
SepType string `json:"sepType"` SepType string `json:"sepType"`
SepID int `json:"sepId"` // NOTE: absent from compute/get output SepID int `json:"sepId"` // NOTE: absent from compute/get output
Shareable bool `json:"shareable"`
SizeMax int `json:"sizeMax"` SizeMax int `json:"sizeMax"`
SizeUsed int `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space SizeUsed int `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
Snapshots []SnapshotRecord `json:"snapshots"` Snapshots []SnapshotRecord `json:"snapshots"`

@ -694,6 +694,18 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Optional: true, Optional: true,
Description: "Storage endpoint provider ID; by default the same with boot disk", Description: "Storage endpoint provider ID; by default the same with boot disk",
}, },
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_used": {
Type: schema.TypeInt,
Computed: true,
},
"pool": { "pool": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -34,10 +35,11 @@ package rg
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "net/url"
"strconv"
"github.com/rudecs/terraform-provider-decort/internal/constants" "github.com/rudecs/terraform-provider-decort/internal/constants"
log "github.com/sirupsen/logrus" "github.com/rudecs/terraform-provider-decort/internal/controller"
// "net/url" // "net/url"
@ -45,53 +47,31 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
) )
func flattenResgroup(d *schema.ResourceData, rg_facts string) error { func utilityDataResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*ResgroupGetResp, error) {
// NOTE: this function modifies ResourceData argument - as such it should never be called c := m.(*controller.ControllerCfg)
// from resourceRsgroupExists(...) method urlValues := &url.Values{}
// log.Debugf("%s", rg_facts) rgData := &ResgroupGetResp{}
log.Debugf("flattenResgroup: ready to decode response body from API")
details := ResgroupGetResp{} urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int)))
err := json.Unmarshal([]byte(rg_facts), &details) rgRaw, err := c.DecortAPICall(ctx, "POST", ResgroupGetAPI, urlValues)
if err != nil { if err != nil {
return err return nil, err
} }
log.Debugf("flattenResgroup: decoded RG name %q / ID %d, account ID %d", err = json.Unmarshal([]byte(rgRaw), rgData)
details.Name, details.ID, details.AccountID) if err != nil {
return nil, err
d.SetId(fmt.Sprintf("%d", details.ID))
d.Set("rg_id", details.ID)
d.Set("name", details.Name)
d.Set("account_name", details.AccountName)
d.Set("account_id", details.AccountID)
// d.Set("grid_id", details.GridID)
d.Set("description", details.Desc)
d.Set("status", details.Status)
d.Set("def_net_type", details.DefaultNetType)
d.Set("def_net_id", details.DefaultNetID)
/*
d.Set("vins", details.Vins)
d.Set("computes", details.Computes)
*/
log.Debugf("flattenResgroup: calling flattenQuota()")
if err = d.Set("quota", parseQuota(details.Quota)); err != nil {
return err
} }
return rgData, nil
return nil
} }
func dataSourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
rg_facts, err := utilityResgroupCheckPresence(ctx, d, m) rg, err := utilityDataResgroupCheckPresence(ctx, d, m)
if rg_facts == "" { if err != nil {
// if empty string is returned from utilityResgroupCheckPresence then there is no
// such resource group and err tells so - just return it to the calling party
d.SetId("") // ensure ID is empty in this case d.SetId("") // ensure ID is empty in this case
return diag.FromErr(err) return diag.FromErr(err)
} }
return diag.FromErr(flattenDataResgroup(d, *rg))
return diag.FromErr(flattenResgroup(d, rg_facts))
} }
func DataSourceResgroup() *schema.Resource { func DataSourceResgroup() *schema.Resource {
@ -126,7 +106,7 @@ func DataSourceResgroup() *schema.Resource {
"account_id": { "account_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Optional: true,
Description: "Unique ID of the account, which this resource group belongs to.", Description: "Unique ID of the account, which this resource group belongs to.",
}, },
@ -135,15 +115,11 @@ func DataSourceResgroup() *schema.Resource {
Computed: true, Computed: true,
Description: "User-defined text description of this resource group.", Description: "User-defined text description of this resource group.",
}, },
"gid": {
/* commented out, as in this version of provider we use default Grid ID
"grid_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Unique ID of the grid, where this resource group is deployed.", Description: "Unique ID of the grid, where this resource group is deployed.",
}, },
*/
"quota": { "quota": {
Type: schema.TypeList, Type: schema.TypeList,
Computed: true, Computed: true,
@ -165,32 +141,150 @@ func DataSourceResgroup() *schema.Resource {
Description: "ID of the default network for this resource group (if any).", Description: "ID of the default network for this resource group (if any).",
}, },
/* "resources": {
"status": { Type: schema.TypeList,
Type: schema.TypeString, Computed: true,
Computed: true, Elem: &schema.Resource{
Description: "Current status of this resource group.", Schema: map[string]*schema.Schema{
"current": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeInt,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeInt,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
},
},
},
},
}, },
},
"vins": { "status": {
Type: schema.TypeList, // this is a list of ints Type: schema.TypeString,
Computed: true, Computed: true,
MaxItems: LimitMaxVinsPerResgroup, Description: "Current status of this resource group.",
Elem: &schema.Schema{ },
Type: schema.TypeInt,
}, "vins": {
Description: "List of VINs deployed in this resource group.", Type: schema.TypeList, // this is a list of ints
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
}, },
Description: "List of VINs deployed in this resource group.",
},
"computes": { "vms": {
Type: schema.TypeList, //t his is a list of ints Type: schema.TypeList, //t his is a list of ints
Computed: true, Computed: true,
Elem: &schema.Schema{ Elem: &schema.Schema{
Type: schema.TypeInt, Type: schema.TypeInt,
},
Description: "List of computes deployed in this resource group.",
}, },
*/ Description: "List of computes deployed in this resource group.",
},
}, },
} }
} }

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -0,0 +1,145 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package rg
import (
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
)
func flattenAccountSeps(seps map[string]map[string]ResourceSep) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for sepKey, sepVal := range seps {
for dataKey, dataVal := range sepVal {
temp := map[string]interface{}{
"sep_id": sepKey,
"data_name": dataKey,
"disk_size": dataVal.DiskSize,
"disk_size_max": dataVal.DiskSizeMax,
}
res = append(res, temp)
}
}
return res
}
func flattenAccResource(r Resource) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"cpu": r.CPU,
"disksize": r.Disksize,
"extips": r.Extips,
"exttraffic": r.Exttraffic,
"gpu": r.GPU,
"ram": r.RAM,
"seps": flattenAccountSeps(r.SEPs),
}
res = append(res, temp)
return res
}
func flattenRgResources(r Resources) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"current": flattenAccResource(r.Current),
"reserved": flattenAccResource(r.Reserved),
}
res = append(res, temp)
return res
}
func flattenDataResgroup(d *schema.ResourceData, details ResgroupGetResp) error {
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceRsgroupExists(...) method
// log.Debugf("%s", rg_facts)
log.Debugf("flattenResgroup: decoded RG name %q / ID %d, account ID %d",
details.Name, details.ID, details.AccountID)
d.SetId(fmt.Sprintf("%d", details.ID))
d.Set("rg_id", details.ID)
d.Set("name", details.Name)
d.Set("account_name", details.AccountName)
d.Set("account_id", details.AccountID)
d.Set("gid", details.GridID)
d.Set("description", details.Desc)
d.Set("status", details.Status)
d.Set("def_net_type", details.DefaultNetType)
d.Set("def_net_id", details.DefaultNetID)
d.Set("resources", flattenRgResources(details.Resources))
d.Set("vins", details.Vins)
d.Set("vms", details.Computes)
log.Debugf("flattenResgroup: calling flattenQuota()")
if err := d.Set("quota", parseQuota(details.Quota)); err != nil {
return err
}
return nil
}
func flattenResgroup(d *schema.ResourceData, details ResgroupGetResp) error {
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceRsgroupExists(...) method
// log.Debugf("%s", rg_facts)
//log.Debugf("flattenResgroup: ready to decode response body from API")
//details := ResgroupGetResp{}
//err := json.Unmarshal([]byte(rg_facts), &details)
//if err != nil {
//return err
//}
log.Debugf("flattenResgroup: decoded RG name %q / ID %d, account ID %d",
details.Name, details.ID, details.AccountID)
d.SetId(fmt.Sprintf("%d", details.ID))
d.Set("rg_id", details.ID)
d.Set("name", details.Name)
d.Set("account_name", details.AccountName)
d.Set("account_id", details.AccountID)
d.Set("gid", details.GridID)
d.Set("description", details.Desc)
d.Set("status", details.Status)
d.Set("def_net_type", details.DefaultNetType)
d.Set("def_net_id", details.DefaultNetID)
d.Set("resources", flattenRgResources(details.Resources))
d.Set("vins", details.Vins)
d.Set("vms", details.Computes)
log.Debugf("flattenResgroup: calling flattenQuota()")
if err := d.Set("quota", parseQuota(details.Quota)); err != nil {
return err
}
return nil
}

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -91,27 +92,28 @@ type AccountAclRecord struct {
} }
type ResgroupGetResp struct { type ResgroupGetResp struct {
ACLs []UserAclRecord `json:"ACLs"` Resources Resources `json:"Resources"`
Usage UsageRecord `json:"Resources"` ACLs []UserAclRecord `json:"ACLs"`
AccountID int `json:"accountId"` //Usage UsageRecord `json:"Resources"`
AccountName string `json:"accountName"` AccountID int `json:"accountId"`
GridID int `json:"gid"` AccountName string `json:"accountName"`
CreatedBy string `json:"createdBy"` GridID int `json:"gid"`
CreatedTime uint64 `json:"createdTime"` CreatedBy string `json:"createdBy"`
DefaultNetID int `json:"def_net_id"` CreatedTime uint64 `json:"createdTime"`
DefaultNetType string `json:"def_net_type"` DefaultNetID int `json:"def_net_id"`
DeletedBy string `json:"deletedBy"` DefaultNetType string `json:"def_net_type"`
DeletedTime uint64 `json:"deletedTime"` DeletedBy string `json:"deletedBy"`
Desc string `json:"desc"` DeletedTime uint64 `json:"deletedTime"`
ID uint `json:"id"` Desc string `json:"desc"`
LockStatus string `json:"lockStatus"` ID uint `json:"id"`
Name string `json:"name"` LockStatus string `json:"lockStatus"`
Quota QuotaRecord `json:"resourceLimits"` Name string `json:"name"`
Status string `json:"status"` Quota QuotaRecord `json:"resourceLimits"`
UpdatedBy string `json:"updatedBy"` Status string `json:"status"`
UpdatedTime uint64 `json:"updatedTime"` UpdatedBy string `json:"updatedBy"`
Vins []int `json:"vins"` UpdatedTime uint64 `json:"updatedTime"`
Computes []int `json:"vms"` Vins []int `json:"vins"`
Computes []int `json:"vms"`
Ignored map[string]interface{} `json:"-"` Ignored map[string]interface{} `json:"-"`
} }
@ -147,3 +149,23 @@ type UsageRecord struct {
Current ResourceRecord `json:"Current"` Current ResourceRecord `json:"Current"`
Reserved ResourceRecord `json:"Reserved"` Reserved ResourceRecord `json:"Reserved"`
} }
type ResourceSep struct {
DiskSize float64 `json:"disksize"`
DiskSizeMax int `json:"disksizemax"`
}
type Resource struct {
CPU int `json:"cpu"`
Disksize int `json:"disksize"`
Extips int `json:"extips"`
Exttraffic int `json:"exttraffic"`
GPU int `json:"gpu"`
RAM int `json:"ram"`
SEPs map[string]map[string]ResourceSep `json:"seps"`
}
type Resources struct {
Current Resource `json:"Current"`
Reserved Resource `json:"Reserved"`
}

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -33,9 +34,9 @@ package rg
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"net/url" "net/url"
"strconv"
"github.com/rudecs/terraform-provider-decort/internal/constants" "github.com/rudecs/terraform-provider-decort/internal/constants"
"github.com/rudecs/terraform-provider-decort/internal/controller" "github.com/rudecs/terraform-provider-decort/internal/controller"
@ -133,16 +134,11 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
d.SetId(api_resp) // rg/create API returns ID of the newly creted resource group on success d.SetId(api_resp) // rg/create API returns ID of the newly creted resource group on success
// rg.ID, _ = strconv.Atoi(api_resp) // rg.ID, _ = strconv.Atoi(api_resp)
if !set_quota { if !set_quota {
resp, err := utilityResgroupCheckPresence(ctx, d, m) rg, err := utilityResgroupCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
rg := ResgroupGetResp{}
if err := json.Unmarshal([]byte(resp), &rg); err != nil {
return diag.FromErr(err)
}
d.Set("quota", parseQuota(rg.Quota)) d.Set("quota", parseQuota(rg.Quota))
} }
@ -155,14 +151,14 @@ func resourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interfa
d.Get("name").(string), d.Get("account_id").(int)) d.Get("name").(string), d.Get("account_id").(int))
rg_facts, err := utilityResgroupCheckPresence(ctx, d, m) rg_facts, err := utilityResgroupCheckPresence(ctx, d, m)
if rg_facts == "" { if err != nil {
// if empty string is returned from utilityResgroupCheckPresence then there is no // if empty string is returned from utilityResgroupCheckPresence then there is no
// such resource group and err tells so - just return it to the calling party // such resource group and err tells so - just return it to the calling party
d.SetId("") // ensure ID is empty d.SetId("") // ensure ID is empty
return diag.FromErr(err) return diag.FromErr(err)
} }
return diag.FromErr(flattenResgroup(d, rg_facts)) return diag.FromErr(flattenResgroup(d, *rg_facts))
} }
func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -272,24 +268,21 @@ func resourceResgroupDelete(ctx context.Context, d *schema.ResourceData, m inter
log.Debugf("resourceResgroupDelete: called for RG name %s, account ID %d", log.Debugf("resourceResgroupDelete: called for RG name %s, account ID %d",
d.Get("name").(string), d.Get("account_id").(int)) d.Get("name").(string), d.Get("account_id").(int))
rg_facts, err := utilityResgroupCheckPresence(ctx, d, m) c := m.(*controller.ControllerCfg)
if rg_facts == "" {
if err != nil {
return diag.FromErr(err)
}
// the target RG does not exist - in this case according to Terraform best practice
// we exit from Destroy method without error
return nil
}
url_values := &url.Values{} url_values := &url.Values{}
url_values.Add("rgId", d.Id()) url_values.Add("rgId", d.Id())
url_values.Add("force", "1") if force, ok := d.GetOk("force"); ok {
url_values.Add("permanently", "1") url_values.Add("force", strconv.FormatBool(force.(bool)))
url_values.Add("reason", "Destroyed by DECORT Terraform provider") }
if permanently, ok := d.GetOk("permanently"); ok {
url_values.Add("permanently", strconv.FormatBool(permanently.(bool)))
}
if reason, ok := d.GetOk("reason"); ok {
url_values.Add("reason", reason.(string))
}
c := m.(*controller.ControllerCfg) _, err := c.DecortAPICall(ctx, "POST", ResgroupDeleteAPI, url_values)
_, err = c.DecortAPICall(ctx, "POST", ResgroupDeleteAPI, url_values)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -297,134 +290,278 @@ func resourceResgroupDelete(ctx context.Context, d *schema.ResourceData, m inter
return nil return nil
} }
func ResourceResgroup() *schema.Resource { func ResourceRgSchemaMake() map[string]*schema.Schema {
return &schema.Resource{ return map[string]*schema.Schema{
SchemaVersion: 1, "account_id": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "Unique ID of the account, which this resource group belongs to.",
},
CreateContext: resourceResgroupCreate, "gid": {
ReadContext: resourceResgroupRead, Type: schema.TypeInt,
UpdateContext: resourceResgroupUpdate, Required: true,
DeleteContext: resourceResgroupDelete, ForceNew: true, // change of Grid ID will require new RG
Description: "Unique ID of the grid, where this resource group is deployed.",
},
Importer: &schema.ResourceImporter{ "name": {
StateContext: schema.ImportStatePassthroughContext, Type: schema.TypeString,
Required: true,
Description: "Name of this resource group. Names are case sensitive and unique within the context of a account.",
}, },
Timeouts: &schema.ResourceTimeout{ "def_net_type": {
Create: &constants.Timeout600s, Type: schema.TypeString,
Read: &constants.Timeout300s, Optional: true,
Update: &constants.Timeout300s, Default: "PRIVATE",
Delete: &constants.Timeout300s, ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC", "NONE"}, false),
Default: &constants.Timeout300s, Description: "Type of the network, which this resource group will use as default for its computes - PRIVATE or PUBLIC or NONE.",
}, },
Schema: map[string]*schema.Schema{ "def_net_id": {
"name": { Type: schema.TypeInt,
Type: schema.TypeString, Computed: true,
Required: true, Description: "ID of the default network for this resource group (if any).",
Description: "Name of this resource group. Names are case sensitive and unique within the context of a account.", },
},
"account_id": { "ipcidr": {
Type: schema.TypeInt, Type: schema.TypeString,
Required: true, Optional: true,
ValidateFunc: validation.IntAtLeast(1), Description: "Address of the netowrk inside the private network segment (aka ViNS) if def_net_type=PRIVATE",
Description: "Unique ID of the account, which this resource group belongs to.", },
},
"def_net_type": { "ext_net_id": {
Type: schema.TypeString, Type: schema.TypeInt,
Optional: true, Optional: true,
Default: "PRIVATE", Default: 0,
ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC", "NONE"}, false), Description: "ID of the external network for default ViNS. Pass 0 if def_net_type=PUBLIC or no external connection required for the defult ViNS when def_net_type=PRIVATE",
Description: "Type of the network, which this resource group will use as default for its computes - PRIVATE or PUBLIC or NONE.", },
},
"def_net_id": { "ext_ip": {
Type: schema.TypeInt, Type: schema.TypeString,
Computed: true, Optional: true,
Description: "ID of the default network for this resource group (if any).", Description: "IP address on the external netowrk to request when def_net_type=PRIVATE and ext_net_id is not 0",
}, },
"ipcidr": { "quota": {
Type: schema.TypeString, Type: schema.TypeList,
Optional: true, Optional: true,
Description: "Address of the netowrk inside the private network segment (aka ViNS) if def_net_type=PRIVATE", Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: quotaRgSubresourceSchemaMake(),
}, },
Description: "Quota settings for this resource group.",
},
"ext_net_id": { "description": {
Type: schema.TypeInt, Type: schema.TypeString,
Optional: true, Optional: true,
Default: 0, Description: "User-defined text description of this resource group.",
Description: "ID of the external network for default ViNS. Pass 0 if def_net_type=PUBLIC or no external connection required for the defult ViNS when def_net_type=PRIVATE", },
}, "force": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Set to True if you want force delete non-empty RG",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Set to True if you want force delete non-empty RG",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "Set to True if you want force delete non-empty RG",
},
"ext_ip": { "account_name": {
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Computed: true,
Description: "IP address on the external netowrk to request when def_net_type=PRIVATE and ext_net_id is not 0", Description: "Name of the account, which this resource group belongs to.",
}, },
/* commented out, as in this version of provider we use default Grid ID "resources": {
"grid_id": { Type: schema.TypeList,
Type: schema.TypeInt, Computed: true,
Optional: true, Elem: &schema.Resource{
Default: 0, // if 0 is passed, default Grid ID will be used Schema: map[string]*schema.Schema{
// DefaultFunc: utilityResgroupGetDefaultGridID, "current": {
ForceNew: true, // change of Grid ID will require new RG Type: schema.TypeList,
Description: "Unique ID of the grid, where this resource group is deployed.", Computed: true,
}, Elem: &schema.Resource{
*/ Schema: map[string]*schema.Schema{
"cpu": {
"quota": { Type: schema.TypeInt,
Type: schema.TypeList, Computed: true,
Optional: true, },
Computed: true, "disksize": {
MaxItems: 1, Type: schema.TypeInt,
Elem: &schema.Resource{ Computed: true,
Schema: quotaRgSubresourceSchemaMake(), },
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeInt,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
},
},
},
}, },
Description: "Quota settings for this resource group.",
}, },
},
"description": { "status": {
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Computed: true,
Description: "User-defined text description of this resource group.", Description: "Current status of this resource group.",
},
"vins": {
Type: schema.TypeList, //this is a list of ints
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
}, },
Description: "List of VINs deployed in this resource group.",
},
"account_name": { "vms": {
Type: schema.TypeString, Type: schema.TypeList, //t his is a list of ints
Computed: true, Computed: true,
Description: "Name of the account, which this resource group belongs to.", Elem: &schema.Schema{
Type: schema.TypeInt,
}, },
Description: "List of computes deployed in this resource group.",
},
/* "computes": {
"status": { Type: schema.TypeList, //this is a list of ints
Type: schema.TypeString, Computed: true,
Computed: true, Elem: &schema.Schema{
Description: "Current status of this resource group.", Type: schema.TypeInt,
}, },
Description: "List of computes deployed in this resource group.",
},
}
}
"vins": { func ResourceResgroup() *schema.Resource {
Type: schema.TypeList, // this is a list of ints return &schema.Resource{
Computed: true, SchemaVersion: 1,
MaxItems: LimitMaxVinsPerResgroup,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of VINs deployed in this resource group.",
},
"computes": { CreateContext: resourceResgroupCreate,
Type: schema.TypeList, // this is a list of ints ReadContext: resourceResgroupRead,
Computed: true, UpdateContext: resourceResgroupUpdate,
Elem: &schema.Schema{ DeleteContext: resourceResgroupDelete,
Type: schema.TypeInt,
}, Importer: &schema.ResourceImporter{
Description: "List of computes deployed in this resource group.", StateContext: schema.ImportStatePassthroughContext,
}, },
*/
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout600s,
Read: &constants.Timeout300s,
Update: &constants.Timeout300s,
Delete: &constants.Timeout300s,
Default: &constants.Timeout300s,
}, },
Schema: ResourceRgSchemaMake(),
} }
} }

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -34,19 +35,17 @@ package rg
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"net/url" "net/url"
"strconv" "strconv"
"github.com/rudecs/terraform-provider-decort/internal/controller" "github.com/rudecs/terraform-provider-decort/internal/controller"
log "github.com/sirupsen/logrus"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
) )
// On success this function returns a string, as returned by API rg/get, which could be unmarshalled // On success this function returns a string, as returned by API rg/get, which could be unmarshalled
// into ResgroupGetResp structure // into ResgroupGetResp structure
func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) { func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*ResgroupGetResp, error) {
// This function tries to locate resource group by one of the following algorithms depending // This function tries to locate resource group by one of the following algorithms depending
// on the parameters passed: // on the parameters passed:
// - if resource group ID is specified -> by RG ID // - if resource group ID is specified -> by RG ID
@ -67,73 +66,21 @@ func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
urlValues := &url.Values{} urlValues := &url.Values{}
// make it possible to use "read" & "check presence" functions with RG ID set so if d.Id() != "" {
// that Import of RG resource is possible urlValues.Add("rgId", d.Id())
idSet := false
theId, err := strconv.Atoi(d.Id())
if err != nil || theId <= 0 {
rgId, argSet := d.GetOk("rg_id")
if argSet {
theId = rgId.(int)
idSet = true
}
} else { } else {
idSet = true urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int)))
} }
if idSet { rgData := &ResgroupGetResp{}
// go straight for the RG by its ID rgRaw, err := c.DecortAPICall(ctx, "POST", ResgroupGetAPI, urlValues)
log.Debugf("utilityResgroupCheckPresence: locating RG by its ID %d", theId)
urlValues.Add("rgId", fmt.Sprintf("%d", theId))
rgFacts, err := c.DecortAPICall(ctx, "POST", ResgroupGetAPI, urlValues)
if err != nil {
return "", err
}
return rgFacts, nil
}
rgName, argSet := d.GetOk("name")
if !argSet {
// no RG ID and no RG name - we cannot locate resource group in this case
return "", fmt.Errorf("Cannot check resource group presence if name is empty and no resource group ID specified")
}
// Valid account ID is required to locate a resource group
// obtain Account ID by account name - it should not be zero on success
urlValues.Add("includedeleted", "false")
apiResp, err := c.DecortAPICall(ctx, "POST", ResgroupListAPI, urlValues)
if err != nil { if err != nil {
return "", err return nil, err
}
// log.Debugf("%s", apiResp)
log.Debugf("utilityResgroupCheckPresence: ready to decode response body from %s", ResgroupListAPI)
model := ResgroupListResp{}
err = json.Unmarshal([]byte(apiResp), &model)
if err != nil {
return "", err
} }
log.Debugf("utilityResgroupCheckPresence: traversing decoded Json of length %d", len(model)) err = json.Unmarshal([]byte(rgRaw), rgData)
for index, item := range model { if err != nil {
// match by RG name & account ID return nil, err
if item.Name == rgName.(string) && item.AccountID == d.Get("account_id").(int) {
log.Debugf("utilityResgroupCheckPresence: match RG name %s / ID %d, account ID %d at index %d",
item.Name, item.ID, item.AccountID, index)
// not all required information is returned by rg/list API, so we need to initiate one more
// call to rg/get to obtain extra data to complete Resource population.
// Namely, we need resource quota settings
reqValues := &url.Values{}
reqValues.Add("rgId", fmt.Sprintf("%d", item.ID))
apiResp, err := c.DecortAPICall(ctx, "POST", ResgroupGetAPI, reqValues)
if err != nil {
return "", err
}
return apiResp, nil
}
} }
return rgData, nil
return "", fmt.Errorf("Cannot find RG name %s owned by account ID %d", rgName, d.Get("account_id").(int))
} }

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -1,9 +0,0 @@
terraform {
required_providers {
decort = {
source = " digitalenergy.online/decort/decort"
version = "3.3.1"
}
}
}

@ -33,6 +33,7 @@ resource "decort_disk" "acl" {
restore = true restore = true
permanently = true permanently = true
reason = "delete" reason = "delete"
shareable = false
iotune { iotune {
read_bytes_sec = 0 read_bytes_sec = 0
read_bytes_sec_max = 0 read_bytes_sec_max = 0

@ -1,7 +0,0 @@
sonar.projectKey=terraform-provider-decort-sast
sonar.dependencyCheck.jsonReportPath=dependency-check-report.json
sonar.dependencyCheck.htmlReportPath=dependency-check-report.html
sonar.exclusions=dependency-check-report.*
sonar.language=go
Loading…
Cancel
Save