gos_tech_4.4.3
stSolo 2 years ago
parent 71ddaa3345
commit 31be0a0b54

@ -1,3 +1,36 @@
### Bug fixes ### New data sources
- error naming lb resources - decort_disk_snapshot_list
- decort_snapshot
- decort_disk_list_deleted
- decort_disk_list_unattached
- decort_disk_list_types
- decort_disk_list_types_detailed
### New resources
- decort_disk_snapshot
### New features
- add dockerfile for creating an image for the tf provider
- change behaviour to disks: check the disk status during update the tf state
- add disk block to kvmvm resource
### New articles on wiki
- [Сборка terraform провайдера в образ](https://github.com/rudecs/terraform-provider-decort/wiki/04.05-Сборка-terraform-провайдера-в-образ)
- [Массовое создание ресурсов. Мета аргументы](https://github.com/rudecs/terraform-provider-decort/wiki/05.04-Массовое-создание-ресурсов.-Мета-аргументы)
- [Удаление ресурсов](https://github.com/rudecs/terraform-provider-decort/wiki/05.05-Удаление-ресурсов)
- [Управление снимком диска](https://github.com/rudecs/terraform-provider-decort/wiki/07.01.19-Resource-функция-decort_disk_snapshot-управление-снимком-диска)
- [Получение списка типов для диска](https://github.com/rudecs/terraform-provider-decort/wiki/06.01.39-Data-функция-decort_disk_list_types-получение-списка-типов-диска)
- [Расширенное получение списка поддерживаемых типов](https://github.com/rudecs/terraform-provider-decort/wiki/06.01.40-Data-функция-decort_disk_list_types_detailed-расширенное-получение-информации-о-поддерживаемых-типах-дисков)
- [Получение информации об удаленных дисках](https://github.com/rudecs/terraform-provider-decort/wiki/06.01.41-Data-функция-decort_disk_list_deleted-получение-информации-об-удаленных-дисках)
- [Получение информации о неподключенных дисках](https://github.com/rudecs/terraform-provider-decort/wiki/06.01.42-Data-функция-decort_disk_list_unattached-получение-информации-о-неподключенных-дисках)
- [Получение списка снимков состояния диска](https://github.com/rudecs/terraform-provider-decort/wiki/06.01.43-Data-функция-decort_disk_snapshot_list-получение-списка-снимков-состояния-диска)
- [Получение информацуии о снимке состояния диска](https://github.com/rudecs/terraform-provider-decort/wiki/06.01.44-Data-функция-decort_disk_snapshot-получение-информации-о-снимке-состояния)
### Update articles
- [Управление дисковыми ресурсами.](https://github.com/rudecs/terraform-provider-decort/wiki/07.01.03-Resource-функция-decort_disk-управление-дисковыми-ресурсами)
- [Управление виртуальными серверами, создаваемыми на базе системы виртуализации KVM](https://github.com/rudecs/terraform-provider-decort/wiki/07.01.01-Resource-функция-decort_kvmvm-управление-виртуальными-машинами-на-базе-KVM)

@ -0,0 +1,10 @@
FROM docker.io/hashicorp/terraform:latest
WORKDIR /opt/decort/tf/
COPY provider.tf ./
COPY terraform-provider-decort ./terraform.d/plugins/digitalenergy.online/decort/decort/3.1.1/linux_amd64/
RUN terraform init
WORKDIR /tf
COPY entrypoint.sh /
ENTRYPOINT ["/entrypoint.sh", "/bin/terraform"]

@ -6,12 +6,18 @@ NAME=terraform-provider-decort
BINARY=${NAME}.exe BINARY=${NAME}.exe
WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH} WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH}
MAINPATH = ./cmd/decort/ MAINPATH = ./cmd/decort/
VERSION=1.1 VERSION=3.1.1
#OS_ARCH=darwin_amd64 #OS_ARCH=darwin_amd64
OS_ARCH=windows_amd64 #OS_ARCH=windows_amd64
#OS_ARCH=linux_amd64
default: install default: install
image:
GOOS=linux GOARCH=amd64 go build -o terraform-provider-decort ./cmd/decort/
docker build . -t rudecs/tf:3.1.1
rm terraform-provider-decort
lint: lint:
golangci-lint run --timeout 600s golangci-lint run --timeout 600s

@ -0,0 +1,4 @@
#!/bin/sh
cp -aL /opt/decort/tf/* /opt/decort/tf/.* ./
exec "$@"

@ -25,4 +25,6 @@ import "time"
var Timeout30s = time.Second * 30 var Timeout30s = time.Second * 30
var Timeout60s = time.Second * 60 var Timeout60s = time.Second * 60
var Timeout180s = time.Second * 180 var Timeout180s = time.Second * 180
var Timeout300s = time.Second * 300
var Timeout600s = time.Second * 600
var Timeout20m = time.Minute * 20 var Timeout20m = time.Minute * 20

@ -44,6 +44,12 @@ func NewDataSourcesMap() map[string]*schema.Resource {
"decort_disk": disks.DataSourceDisk(), "decort_disk": disks.DataSourceDisk(),
"decort_disk_list": disks.DataSourceDiskList(), "decort_disk_list": disks.DataSourceDiskList(),
"decort_rg_list": rg.DataSourceRgList(), "decort_rg_list": rg.DataSourceRgList(),
"decort_disk_list_types_detailed": disks.DataSourceDiskListTypesDetailed(),
"decort_disk_list_types": disks.DataSourceDiskListTypes(),
"decort_disk_list_deleted": disks.DataSourceDiskListDeleted(),
"decort_disk_list_unattached": disks.DataSourceDiskListUnattached(),
"decort_disk_snapshot": disks.DataSourceDiskSnapshot(),
"decort_disk_snapshot_list": disks.DataSourceDiskSnapshotList(),
"decort_account_list": account.DataSourceAccountList(), "decort_account_list": account.DataSourceAccountList(),
"decort_account_computes_list": account.DataSourceAccountComputesList(), "decort_account_computes_list": account.DataSourceAccountComputesList(),
"decort_account_disks_list": account.DataSourceAccountDisksList(), "decort_account_disks_list": account.DataSourceAccountDisksList(),

@ -39,6 +39,7 @@ func NewRersourcesMap() map[string]*schema.Resource {
"decort_resgroup": rg.ResourceResgroup(), "decort_resgroup": rg.ResourceResgroup(),
"decort_kvmvm": kvmvm.ResourceCompute(), "decort_kvmvm": kvmvm.ResourceCompute(),
"decort_disk": disks.ResourceDisk(), "decort_disk": disks.ResourceDisk(),
"decort_disk_snapshot": disks.ResourceDiskSnapshot(),
"decort_vins": vins.ResourceVins(), "decort_vins": vins.ResourceVins(),
"decort_pfw": pfw.ResourcePfw(), "decort_pfw": pfw.ResourcePfw(),
"decort_k8s": k8s.ResourceK8s(), "decort_k8s": k8s.ResourceK8s(),

@ -790,11 +790,11 @@ func ResourceAccount() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout60s, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: resourceAccountSchemaMake(), Schema: resourceAccountSchemaMake(),

@ -515,11 +515,11 @@ func ResourceBasicService() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout60s, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: resourceBasicServiceSchemaMake(), Schema: resourceBasicServiceSchemaMake(),

@ -620,11 +620,11 @@ func ResourceBasicServiceGroup() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout60s, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: resourceBasicServiceGroupSchemaMake(), Schema: resourceBasicServiceGroupSchemaMake(),

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -31,11 +32,19 @@ Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
package disks package disks
const disksCreateAPI = "/restmachine/cloudapi/disks/create" const (
const disksGetAPI = "/restmachine/cloudapi/disks/get" disksCreateAPI = "/restmachine/cloudapi/disks/create"
const disksListAPI = "/restmachine/cloudapi/disks/list" disksGetAPI = "/restmachine/cloudapi/disks/get"
const disksResizeAPI = "/restmachine/cloudapi/disks/resize2" disksListAPI = "/restmachine/cloudapi/disks/list"
const disksRenameAPI = "/restmachine/cloudapi/disks/rename" disksResizeAPI = "/restmachine/cloudapi/disks/resize2"
const disksDeleteAPI = "/restmachine/cloudapi/disks/delete" disksRenameAPI = "/restmachine/cloudapi/disks/rename"
const disksIOLimitAPI = "/restmachine/cloudapi/disks/limitIO" disksDeleteAPI = "/restmachine/cloudapi/disks/delete"
const disksRestoreAPI = "/restmachine/cloudapi/disks/restore" disksIOLimitAPI = "/restmachine/cloudapi/disks/limitIO"
disksRestoreAPI = "/restmachine/cloudapi/disks/restore"
disksListTypesAPI = "/restmachine/cloudapi/disks/listTypes"
disksListDeletedAPI = "/restmachine/cloudapi/disks/listDeleted"
disksListUnattachedAPI = "/restmachine/cloudapi/disks/listUnattached"
disksSnapshotDeleteAPI = "/restmachine/cloudapi/disks/snapshotDelete"
disksSnapshotRollbackAPI = "/restmachine/cloudapi/disks/snapshotRollback"
)

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -94,7 +95,7 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface
d.Set("sep_type", disk.SepType) d.Set("sep_type", disk.SepType)
d.Set("size_max", disk.SizeMax) d.Set("size_max", disk.SizeMax)
d.Set("size_used", disk.SizeUsed) d.Set("size_used", disk.SizeUsed)
d.Set("snapshots", flattendDiskSnapshotList(disk.Snapshots)) d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots))
d.Set("status", disk.Status) d.Set("status", disk.Status)
d.Set("tech_status", disk.TechStatus) d.Set("tech_status", disk.TechStatus)
d.Set("type", disk.Type) d.Set("type", disk.Type)
@ -106,68 +107,83 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface
func dataSourceDiskSchemaMake() map[string]*schema.Schema { func dataSourceDiskSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{ rets := map[string]*schema.Schema{
"disk_id": { "disk_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
}, },
"account_id": { "account_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "The unique ID of the subscriber-owner of the disk",
}, },
"account_name": { "account_name": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "The name of the subscriber '(account') to whom this disk belongs",
}, },
"acl": { "acl": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"boot_partition": { "boot_partition": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of disk partitions",
}, },
"compute_id": { "compute_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Compute ID",
}, },
"compute_name": { "compute_name": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Compute name",
}, },
"created_time": { "created_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Created time",
}, },
"deleted_time": { "deleted_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Deleted time",
}, },
"desc": { "desc": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Description of disk",
}, },
"destruction_time": { "destruction_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Time of final deletion",
}, },
"devicename": { "devicename": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Name of the device",
}, },
"disk_path": { "disk_path": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Disk path",
}, },
"gid": { "gid": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "ID of the grid (platform)",
}, },
"guid": { "guid": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Disk ID on the storage side",
}, },
"image_id": { "image_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Image ID",
}, },
"images": { "images": {
Type: schema.TypeList, Type: schema.TypeList,
@ -175,6 +191,7 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Elem: &schema.Schema{ Elem: &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
}, },
Description: "IDs of images using the disk",
}, },
"iotune": { "iotune": {
Type: schema.TypeList, Type: schema.TypeList,
@ -182,143 +199,177 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{ Elem: &schema.Resource{
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"read_bytes_sec": { "read_bytes_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of bytes to read per second",
}, },
"read_bytes_sec_max": { "read_bytes_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Maximum number of bytes to read",
}, },
"read_iops_sec": { "read_iops_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of io read operations per second",
}, },
"read_iops_sec_max": { "read_iops_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Maximum number of io read operations",
}, },
"size_iops_sec": { "size_iops_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Size of io operations",
}, },
"total_bytes_sec": { "total_bytes_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Total size bytes per second",
}, },
"total_bytes_sec_max": { "total_bytes_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Maximum total size of bytes per second",
}, },
"total_iops_sec": { "total_iops_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Total number of io operations per second",
}, },
"total_iops_sec_max": { "total_iops_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Maximum total number of io operations per second",
}, },
"write_bytes_sec": { "write_bytes_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of bytes to write per second",
}, },
"write_bytes_sec_max": { "write_bytes_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Maximum number of bytes to write per second",
}, },
"write_iops_sec": { "write_iops_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of write operations per second",
}, },
"write_iops_sec_max": { "write_iops_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Maximum number of write operations per second",
}, },
}, },
}, },
}, },
"iqn": { "iqn": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Disk IQN",
}, },
"login": { "login": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Login to access the disk",
}, },
"milestones": { "milestones": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Milestones",
}, },
"disk_name": { "disk_name": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Name of disk",
}, },
"order": { "order": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Disk order",
}, },
"params": { "params": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Disk params",
}, },
"parent_id": { "parent_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "ID of the parent disk",
}, },
"passwd": { "passwd": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Password to access the disk",
}, },
"pci_slot": { "pci_slot": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "ID of the pci slot to which the disk is connected",
}, },
"pool": { "pool": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Pool for disk location",
}, },
"purge_attempts": { "purge_attempts": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of deletion attempts",
}, },
"purge_time": { "purge_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Time of the last deletion attempt",
}, },
"reality_device_number": { "reality_device_number": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Reality device number",
}, },
"reference_id": { "reference_id": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "ID of the reference to the disk",
}, },
"res_id": { "res_id": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Resource ID",
}, },
"res_name": { "res_name": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Name of the resource",
}, },
"role": { "role": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Disk role",
}, },
"sep_id": { "sep_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Storage endpoint provider ID to create disk",
}, },
"sep_type": { "sep_type": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform",
}, },
"size_max": { "size_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Size in GB",
}, },
"size_used": { "size_used": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of used space, in GB",
}, },
"snapshots": { "snapshots": {
Type: schema.TypeList, Type: schema.TypeList,
@ -326,47 +377,57 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{ Elem: &schema.Resource{
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"guid": { "guid": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "ID of the snapshot",
}, },
"label": { "label": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Name of the snapshot",
}, },
"res_id": { "res_id": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Reference to the snapshot",
}, },
"snap_set_guid": { "snap_set_guid": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "The set snapshot ID",
}, },
"snap_set_time": { "snap_set_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "The set time of the snapshot",
}, },
"timestamp": { "timestamp": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Snapshot time",
}, },
}, },
}, },
}, },
"status": { "status": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Disk status",
}, },
"tech_status": { "tech_status": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Technical status of the disk",
}, },
"type": { "type": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
}, },
"vmid": { "vmid": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Virtual Machine ID (Deprecated)",
}, },
} }

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -109,7 +110,7 @@ func flattenDiskList(dl DisksList) []map[string]interface{} {
"sep_type": disk.SepType, "sep_type": disk.SepType,
"size_max": disk.SizeMax, "size_max": disk.SizeMax,
"size_used": disk.SizeUsed, "size_used": disk.SizeUsed,
"snapshots": flattendDiskSnapshotList(disk.Snapshots), "snapshots": flattenDiskSnapshotList(disk.Snapshots),
"status": disk.Status, "status": disk.Status,
"tech_status": disk.TechStatus, "tech_status": disk.TechStatus,
"type": disk.Type, "type": disk.Type,
@ -121,7 +122,7 @@ func flattenDiskList(dl DisksList) []map[string]interface{} {
} }
func flattendDiskSnapshotList(sl SnapshotList) []interface{} { func flattenDiskSnapshotList(sl SnapshotList) []interface{} {
res := make([]interface{}, 0) res := make([]interface{}, 0)
for _, snapshot := range sl { for _, snapshot := range sl {
temp := map[string]interface{}{ temp := map[string]interface{}{
@ -140,7 +141,7 @@ func flattendDiskSnapshotList(sl SnapshotList) []interface{} {
} }
func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskList, err := utilityDiskListCheckPresence(ctx, d, m) diskList, err := utilityDiskListCheckPresence(ctx, d, m, disksListAPI)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -180,68 +181,83 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{ Elem: &schema.Resource{
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"account_id": { "account_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "The unique ID of the subscriber-owner of the disk",
}, },
"account_name": { "account_name": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "The name of the subscriber '(account') to whom this disk belongs",
}, },
"acl": { "acl": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"boot_partition": { "boot_partition": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of disk partitions",
}, },
"compute_id": { "compute_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Compute ID",
}, },
"compute_name": { "compute_name": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Compute name",
}, },
"created_time": { "created_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Created time",
}, },
"deleted_time": { "deleted_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Deleted time",
}, },
"desc": { "desc": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Description of disk",
}, },
"destruction_time": { "destruction_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Time of final deletion",
}, },
"devicename": { "devicename": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Name of the device",
}, },
"disk_path": { "disk_path": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Disk path",
}, },
"gid": { "gid": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "ID of the grid (platform)",
}, },
"guid": { "guid": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Disk ID on the storage side",
}, },
"disk_id": { "disk_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "The unique ID of the subscriber-owner of the disk",
}, },
"image_id": { "image_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Image ID",
}, },
"images": { "images": {
Type: schema.TypeList, Type: schema.TypeList,
@ -249,6 +265,7 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Elem: &schema.Schema{ Elem: &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
}, },
Description: "IDs of images using the disk",
}, },
"iotune": { "iotune": {
Type: schema.TypeList, Type: schema.TypeList,
@ -256,151 +273,187 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{ Elem: &schema.Resource{
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"read_bytes_sec": { "read_bytes_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of bytes to read per second",
}, },
"read_bytes_sec_max": { "read_bytes_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Maximum number of bytes to read",
}, },
"read_iops_sec": { "read_iops_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of io read operations per second",
}, },
"read_iops_sec_max": { "read_iops_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Maximum number of io read operations",
}, },
"size_iops_sec": { "size_iops_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Size of io operations",
}, },
"total_bytes_sec": { "total_bytes_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Total size bytes per second",
}, },
"total_bytes_sec_max": { "total_bytes_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Maximum total size of bytes per second",
}, },
"total_iops_sec": { "total_iops_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Total number of io operations per second",
}, },
"total_iops_sec_max": { "total_iops_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Maximum total number of io operations per second",
}, },
"write_bytes_sec": { "write_bytes_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of bytes to write per second",
}, },
"write_bytes_sec_max": { "write_bytes_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Maximum number of bytes to write per second",
}, },
"write_iops_sec": { "write_iops_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of write operations per second",
}, },
"write_iops_sec_max": { "write_iops_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Maximum number of write operations per second",
}, },
}, },
}, },
}, },
"iqn": { "iqn": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Disk IQN",
}, },
"login": { "login": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Login to access the disk",
}, },
"machine_id": { "machine_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Machine ID",
}, },
"machine_name": { "machine_name": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Machine name",
}, },
"milestones": { "milestones": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Milestones",
}, },
"disk_name": { "disk_name": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Name of disk",
}, },
"order": { "order": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Disk order",
}, },
"params": { "params": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Disk params",
}, },
"parent_id": { "parent_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "ID of the parent disk",
}, },
"passwd": { "passwd": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Password to access the disk",
}, },
"pci_slot": { "pci_slot": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "ID of the pci slot to which the disk is connected",
}, },
"pool": { "pool": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Pool for disk location",
}, },
"purge_attempts": { "purge_attempts": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of deletion attempts",
}, },
"purge_time": { "purge_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Time of the last deletion attempt",
}, },
"reality_device_number": { "reality_device_number": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Reality device number",
}, },
"reference_id": { "reference_id": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "ID of the reference to the disk",
}, },
"res_id": { "res_id": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Resource ID",
}, },
"res_name": { "res_name": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Name of the resource",
}, },
"role": { "role": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Disk role",
}, },
"sep_id": { "sep_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Storage endpoint provider ID to create disk",
}, },
"sep_type": { "sep_type": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform",
}, },
"size_max": { "size_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Size in GB",
}, },
"size_used": { "size_used": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of used space, in GB",
}, },
"snapshots": { "snapshots": {
Type: schema.TypeList, Type: schema.TypeList,
@ -408,47 +461,57 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{ Elem: &schema.Resource{
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"guid": { "guid": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "ID of the snapshot",
}, },
"label": { "label": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Name of the snapshot",
}, },
"res_id": { "res_id": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Reference to the snapshot",
}, },
"snap_set_guid": { "snap_set_guid": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "The set snapshot ID",
}, },
"snap_set_time": { "snap_set_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "The set time of the snapshot",
}, },
"timestamp": { "timestamp": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Snapshot time",
}, },
}, },
}, },
}, },
"status": { "status": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Disk status",
}, },
"tech_status": { "tech_status": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Technical status of the disk",
}, },
"type": { "type": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
}, },
"vmid": { "vmid": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Virtual Machine ID (Deprecated)",
}, },
}, },
}, },

@ -0,0 +1,82 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/rudecs/terraform-provider-decort/internal/constants"
)
func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
listTypes, err := utilityDiskListTypesCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("types", listTypes)
return nil
}
func dataSourceDiskListTypesSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"types": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
}
return res
}
func DataSourceDiskListTypes() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDiskListTypesRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDiskListTypesSchemaMake(),
}
}

@ -0,0 +1,133 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/rudecs/terraform-provider-decort/internal/constants"
)
func flattenDiskListTypesDetailed(tld TypesDetailedList) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, typeListDetailed := range tld {
temp := map[string]interface{}{
"pools": flattenListTypesDetailedPools(typeListDetailed.Pools),
"sep_id": typeListDetailed.SepID,
}
res = append(res, temp)
}
return res
}
func flattenListTypesDetailedPools(pools PoolList) []interface{} {
res := make([]interface{}, 0)
for _, pool := range pools {
temp := map[string]interface{}{
"name": pool.Name,
"types": pool.Types,
}
res = append(res, temp)
}
return res
}
func dataSourceDiskListTypesDetailedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
listTypesDetailed, err := utilityDiskListTypesDetailedCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenDiskListTypesDetailed(listTypesDetailed))
return nil
}
func dataSourceDiskListTypesDetailedSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"pools": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Computed: true,
Description: "Pool name",
},
"types": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
},
},
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Storage endpoint provider ID to create disk",
},
},
},
},
}
return res
}
func DataSourceDiskListTypesDetailed() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDiskListTypesDetailedRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDiskListTypesDetailedSchemaMake(),
}
}

@ -0,0 +1,485 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"encoding/json"
"net/url"
"strconv"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/rudecs/terraform-provider-decort/internal/constants"
"github.com/rudecs/terraform-provider-decort/internal/controller"
"github.com/rudecs/terraform-provider-decort/internal/flattens"
log "github.com/sirupsen/logrus"
)
func utilityDiskListUnattachedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (UnattachedList, error) {
unattachedList := UnattachedList{}
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
if accountId, ok := d.GetOk("accountId"); ok {
urlValues.Add("accountId", strconv.Itoa(accountId.(int)))
}
log.Debugf("utilityDiskListUnattachedCheckPresence: load disk Unattached list")
unattachedListRaw, err := c.DecortAPICall(ctx, "POST", disksListUnattachedAPI, urlValues)
if err != nil {
return nil, err
}
err = json.Unmarshal([]byte(unattachedListRaw), &unattachedList)
if err != nil {
return nil, err
}
return unattachedList, nil
}
func flattenDiskListUnattached(ul UnattachedList) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, unattachedDisk := range ul {
unattachedDiskAcl, _ := json.Marshal(unattachedDisk.Acl)
tmp := map[string]interface{}{
"_ckey": unattachedDisk.Ckey,
"_meta": flattens.FlattenMeta(unattachedDisk.Meta),
"account_id": unattachedDisk.AccountID,
"account_name": unattachedDisk.AccountName,
"acl": string(unattachedDiskAcl),
"boot_partition": unattachedDisk.BootPartition,
"created_time": unattachedDisk.CreatedTime,
"deleted_time": unattachedDisk.DeletedTime,
"desc": unattachedDisk.Desc,
"destruction_time": unattachedDisk.DestructionTime,
"disk_path": unattachedDisk.DiskPath,
"gid": unattachedDisk.GridID,
"guid": unattachedDisk.GUID,
"disk_id": unattachedDisk.ID,
"image_id": unattachedDisk.ImageID,
"images": unattachedDisk.Images,
"iotune": flattenIOTune(unattachedDisk.IOTune),
"iqn": unattachedDisk.IQN,
"login": unattachedDisk.Login,
"milestones": unattachedDisk.Milestones,
"disk_name": unattachedDisk.Name,
"order": unattachedDisk.Order,
"params": unattachedDisk.Params,
"parent_id": unattachedDisk.ParentID,
"passwd": unattachedDisk.Passwd,
"pci_slot": unattachedDisk.PciSlot,
"pool": unattachedDisk.Pool,
"purge_attempts": unattachedDisk.PurgeAttempts,
"purge_time": unattachedDisk.PurgeTime,
"reality_device_number": unattachedDisk.RealityDeviceNumber,
"reference_id": unattachedDisk.ReferenceID,
"res_id": unattachedDisk.ResID,
"res_name": unattachedDisk.ResName,
"role": unattachedDisk.Role,
"sep_id": unattachedDisk.SepID,
"size_max": unattachedDisk.SizeMax,
"size_used": unattachedDisk.SizeUsed,
"snapshots": flattenDiskSnapshotList(unattachedDisk.Snapshots),
"status": unattachedDisk.Status,
"tech_status": unattachedDisk.TechStatus,
"type": unattachedDisk.Type,
"vmid": unattachedDisk.VMID,
}
res = append(res, tmp)
}
return res
}
func dataSourceDiskListUnattachedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskListUnattached, err := utilityDiskListUnattachedCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenDiskListUnattached(diskListUnattached))
return nil
}
func DataSourceDiskListUnattached() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDiskListUnattachedRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDiskListUnattachedSchemaMake(),
}
}
func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the account the disks belong to",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"_ckey": {
Type: schema.TypeString,
Computed: true,
Description: "CKey",
},
"_meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "Meta parameters",
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the account the disks belong to",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "The name of the subscriber '(account') to whom this disk belongs",
},
"acl": {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of disk partitions",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Created time",
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Deleted time",
},
"desc": {
Type: schema.TypeString,
Computed: true,
Description: "Description of disk",
},
"destruction_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Time of final deletion",
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
Description: "Disk path",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the grid (platform)",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "Disk ID on the storage side",
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Image ID",
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "IDs of images using the disk",
},
"iotune": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of bytes to read per second",
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of bytes to read",
},
"read_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of io read operations per second",
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of io read operations",
},
"size_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Size of io operations",
},
"total_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Total size bytes per second",
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum total size of bytes per second",
},
"total_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Total number of io operations per second",
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum total number of io operations per second",
},
"write_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of bytes to write per second",
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of bytes to write per second",
},
"write_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of write operations per second",
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of write operations per second",
},
},
},
},
"iqn": {
Type: schema.TypeString,
Computed: true,
Description: "Disk IQN",
},
"login": {
Type: schema.TypeString,
Computed: true,
Description: "Login to access the disk",
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
Description: "Milestones",
},
"disk_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of disk",
},
"order": {
Type: schema.TypeInt,
Computed: true,
Description: "Disk order",
},
"params": {
Type: schema.TypeString,
Computed: true,
Description: "Disk params",
},
"parent_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the parent disk",
},
"passwd": {
Type: schema.TypeString,
Computed: true,
Description: "Password to access the disk",
},
"pci_slot": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the pci slot to which the disk is connected",
},
"pool": {
Type: schema.TypeString,
Computed: true,
Description: "Pool for disk location",
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of deletion attempts",
},
"purge_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Time of the last deletion attempt",
},
"reality_device_number": {
Type: schema.TypeInt,
Computed: true,
Description: "Reality device number",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the reference to the disk",
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Resource ID",
},
"res_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the resource",
},
"role": {
Type: schema.TypeString,
Computed: true,
Description: "Disk role",
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Storage endpoint provider ID to create disk",
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Size in GB",
},
"size_used": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of used space, in GB",
},
"snapshots": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"label": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the snapshot",
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
Description: "Snapshot time",
},
},
},
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Disk status",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "Technical status of the disk",
},
"type": {
Type: schema.TypeString,
Computed: true,
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
"vmid": {
Type: schema.TypeInt,
Computed: true,
Description: "Virtual Machine ID (Deprecated)",
},
},
},
},
}
return res
}

@ -0,0 +1,129 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/rudecs/terraform-provider-decort/internal/constants"
)
func dataSourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
if disk == nil {
if err != nil {
return diag.FromErr(err)
}
return nil
}
snapshots := disk.Snapshots
snapshot := Snapshot{}
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
id := uuid.New()
d.SetId(id.String())
d.Set("timestamp", snapshot.TimeStamp)
d.Set("guid", snapshot.Guid)
d.Set("res_id", snapshot.ResId)
d.Set("snap_set_guid", snapshot.SnapSetGuid)
d.Set("snap_set_time", snapshot.SnapSetTime)
return nil
}
func DataSourceDiskSnapshot() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDiskSnapshotRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDiskSnapshotSchemaMake(),
}
}
func dataSourceDiskSnapshotSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"label": {
Type: schema.TypeString,
Required: true,
Description: "Name of the snapshot",
},
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
Description: "Snapshot time",
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
}
return rets
}

@ -0,0 +1,121 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/rudecs/terraform-provider-decort/internal/constants"
)
func dataSourceDiskSnapshotListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
if disk == nil {
if err != nil {
return diag.FromErr(err)
}
return nil
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenDiskSnapshotList(disk.Snapshots))
return nil
}
func DataSourceDiskSnapshotList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDiskSnapshotListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDiskSnapshotListSchemaMake(),
}
}
func dataSourceDiskSnapshotListSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"label": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the snapshot",
},
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
Description: "Snapshot time",
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
},
},
},
}
return rets
}

@ -0,0 +1,69 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/rudecs/terraform-provider-decort/internal/constants"
)
func dataSourceDiskListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskList, err := utilityDiskListCheckPresence(ctx, d, m, disksListDeletedAPI)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenDiskList(diskList))
return nil
}
func DataSourceDiskListDeleted() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDiskListDeletedRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDiskListSchemaMake(),
}
}

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -109,3 +110,66 @@ type IOTune struct {
WriteIopsSec int `json:"write_iops_sec"` WriteIopsSec int `json:"write_iops_sec"`
WriteIopsSecMax int `json:"write_iops_sec_max"` WriteIopsSecMax int `json:"write_iops_sec_max"`
} }
type Pool struct {
Name string `json:"name"`
Types []string `json:"types"`
}
type PoolList []Pool
type TypeDetailed struct {
Pools []Pool `json:"pools"`
SepID int `json:"sepId"`
}
type TypesDetailedList []TypeDetailed
type TypesList []string
type Unattached struct {
Ckey string `json:"_ckey"`
Meta []interface{} `json:"_meta"`
AccountID int `json:"accountId"`
AccountName string `json:"accountName"`
Acl map[string]interface{} `json:"acl"`
BootPartition int `json:"bootPartition"`
CreatedTime int `json:"createdTime"`
DeletedTime int `json:"deletedTime"`
Desc string `json:"desc"`
DestructionTime int `json:"destructionTime"`
DiskPath string `json:"diskPath"`
GridID int `json:"gid"`
GUID int `json:"guid"`
ID int `json:"id"`
ImageID int `json:"imageId"`
Images []int `json:"images"`
IOTune IOTune `json:"iotune"`
IQN string `json:"iqn"`
Login string `json:"login"`
Milestones int `json:"milestones"`
Name string `json:"name"`
Order int `json:"order"`
Params string `json:"params"`
ParentID int `json:"parentId"`
Passwd string `json:"passwd"`
PciSlot int `json:"pciSlot"`
Pool string `json:"pool"`
PurgeAttempts int `json:"purgeAttempts"`
PurgeTime int `json:"purgeTime"`
RealityDeviceNumber int `json:"realityDeviceNumber"`
ReferenceID string `json:"referenceId"`
ResID string `json:"resId"`
ResName string `json:"resName"`
Role string `json:"role"`
SepID int `json:"sepId"`
SizeMax int `json:"sizeMax"`
SizeUsed int `json:"sizeUsed"`
Snapshots []Snapshot `json:"snapshots"`
Status string `json:"status"`
TechStatus string `json:"techStatus"`
Type string `json:"type"`
VMID int `json:"vmid"`
}
type UnattachedList []Unattached

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -41,6 +42,7 @@ import (
"github.com/rudecs/terraform-provider-decort/internal/constants" "github.com/rudecs/terraform-provider-decort/internal/constants"
"github.com/rudecs/terraform-provider-decort/internal/controller" "github.com/rudecs/terraform-provider-decort/internal/controller"
"github.com/rudecs/terraform-provider-decort/internal/status"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
@ -119,6 +121,9 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
} }
func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
urlValues := &url.Values{}
c := m.(*controller.ControllerCfg)
disk, err := utilityDiskCheckPresence(ctx, d, m) disk, err := utilityDiskCheckPresence(ctx, d, m)
if disk == nil { if disk == nil {
d.SetId("") d.SetId("")
@ -128,6 +133,28 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
return nil return nil
} }
if disk.Status == status.Destroyed || disk.Status == status.Purged {
d.Set("disk_id", 0)
return resourceDiskCreate(ctx, d, m)
} else if disk.Status == status.Deleted {
urlValues.Add("diskId", d.Id())
urlValues.Add("reason", d.Get("reason").(string))
_, err := c.DecortAPICall(ctx, "POST", disksRestoreAPI, urlValues)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
disk, err = utilityDiskCheckPresence(ctx, d, m)
if disk == nil {
d.SetId("")
if err != nil {
return diag.FromErr(err)
}
return nil
}
}
diskAcl, _ := json.Marshal(disk.Acl) diskAcl, _ := json.Marshal(disk.Acl)
d.Set("account_id", disk.AccountID) d.Set("account_id", disk.AccountID)
@ -169,7 +196,7 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
d.Set("sep_type", disk.SepType) d.Set("sep_type", disk.SepType)
d.Set("size_max", disk.SizeMax) d.Set("size_max", disk.SizeMax)
d.Set("size_used", disk.SizeUsed) d.Set("size_used", disk.SizeUsed)
d.Set("snapshots", flattendDiskSnapshotList(disk.Snapshots)) d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots))
d.Set("status", disk.Status) d.Set("status", disk.Status)
d.Set("tech_status", disk.TechStatus) d.Set("tech_status", disk.TechStatus)
d.Set("type", disk.Type) d.Set("type", disk.Type)
@ -179,9 +206,27 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
} }
func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
urlValues := &url.Values{} urlValues := &url.Values{}
disk, err := utilityDiskCheckPresence(ctx, d, m)
if disk == nil {
if err != nil {
return diag.FromErr(err)
}
return nil
}
if disk.Status == status.Destroyed || disk.Status == status.Purged {
return resourceDiskCreate(ctx, d, m)
} else if disk.Status == status.Deleted {
urlValues.Add("diskId", d.Id())
urlValues.Add("reason", d.Get("reason").(string))
_, err := c.DecortAPICall(ctx, "POST", disksRestoreAPI, urlValues)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
}
if d.HasChange("size_max") { if d.HasChange("size_max") {
oldSize, newSize := d.GetChange("size_max") oldSize, newSize := d.GetChange("size_max")
@ -238,26 +283,10 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
urlValues = &url.Values{} urlValues = &url.Values{}
} }
if d.HasChange("restore") {
if d.Get("restore").(bool) {
urlValues.Add("diskId", d.Id())
urlValues.Add("reason", d.Get("reason").(string))
_, err := c.DecortAPICall(ctx, "POST", disksRestoreAPI, urlValues)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
}
}
return resourceDiskRead(ctx, d, m) return resourceDiskRead(ctx, d, m)
} }
func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m) disk, err := utilityDiskCheckPresence(ctx, d, m)
if disk == nil { if disk == nil {
if err != nil { if err != nil {
@ -265,7 +294,9 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface
} }
return nil return nil
} }
if disk.Status == status.Destroyed || disk.Status == status.Purged {
return nil
}
params := &url.Values{} params := &url.Values{}
params.Add("diskId", d.Id()) params.Add("diskId", d.Id())
params.Add("detach", strconv.FormatBool(d.Get("detach").(bool))) params.Add("detach", strconv.FormatBool(d.Get("detach").(bool)))
@ -277,126 +308,141 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
return nil return nil
} }
func resourceDiskSchemaMake() map[string]*schema.Schema { func resourceDiskSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{ rets := map[string]*schema.Schema{
"account_id": { "account_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true,
Description: "The unique ID of the subscriber-owner of the disk",
}, },
"disk_name": { "disk_name": {
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Required: true,
Description: "Name of disk",
}, },
"size_max": { "size_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
Description: "Size in GB",
}, },
"gid": { "gid": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true,
Description: "ID of the grid (platform)",
}, },
"pool": { "pool": {
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Optional: true,
Computed: true, Computed: true,
Description: "Pool for disk location",
}, },
"sep_id": { "sep_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
Description: "Storage endpoint provider ID to create disk",
}, },
"desc": { "desc": {
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Optional: true,
Computed: true, Computed: true,
Description: "Description of disk",
}, },
"type": { "type": {
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Optional: true,
Computed: true, Computed: true,
ValidateFunc: validation.StringInSlice([]string{"D", "B", "T"}, false), ValidateFunc: validation.StringInSlice([]string{"D", "B", "T"}, false),
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
}, },
"detach": { "detach": {
Type: schema.TypeBool, Type: schema.TypeBool,
Optional: true, Optional: true,
Default: false, Default: false,
Description: "detach disk from machine first", Description: "Detaching the disk from compute",
}, },
"permanently": { "permanently": {
Type: schema.TypeBool, Type: schema.TypeBool,
Optional: true, Optional: true,
Default: false, Default: false,
Description: "whether to completely delete the disk, works only with non attached disks", Description: "Whether to completely delete the disk, works only with non attached disks",
}, },
"reason": { "reason": {
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Optional: true,
Default: "", Default: "",
Description: "reason for an action", Description: "Reason for deletion",
},
"restore": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "restore deleting disk",
}, },
"disk_id": { "disk_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Disk ID. Duplicates the value of the ID parameter",
}, },
"account_name": { "account_name": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "The name of the subscriber '(account') to whom this disk belongs",
}, },
"acl": { "acl": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"boot_partition": { "boot_partition": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of disk partitions",
}, },
"compute_id": { "compute_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Compute ID",
}, },
"compute_name": { "compute_name": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Compute name",
}, },
"created_time": { "created_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Created time",
}, },
"deleted_time": { "deleted_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Deleted time",
}, },
"destruction_time": { "destruction_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Time of final deletion",
}, },
"devicename": { "devicename": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Name of the device",
}, },
"disk_path": { "disk_path": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Disk path",
}, },
"guid": { "guid": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Disk ID on the storage side",
}, },
"image_id": { "image_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Image ID",
}, },
"images": { "images": {
Type: schema.TypeList, Type: schema.TypeList,
@ -404,6 +450,7 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Elem: &schema.Schema{ Elem: &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
}, },
Description: "IDs of images using the disk",
}, },
"iotune": { "iotune": {
Type: schema.TypeList, Type: schema.TypeList,
@ -413,143 +460,171 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{ Elem: &schema.Resource{
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"read_bytes_sec": { "read_bytes_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
Description: "Number of bytes to read per second",
}, },
"read_bytes_sec_max": { "read_bytes_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
Description: "Maximum number of bytes to read",
}, },
"read_iops_sec": { "read_iops_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
Description: "Number of io read operations per second",
}, },
"read_iops_sec_max": { "read_iops_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
Description: "Maximum number of io read operations",
}, },
"size_iops_sec": { "size_iops_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
Description: "Size of io operations",
}, },
"total_bytes_sec": { "total_bytes_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
Description: "Total size bytes per second",
}, },
"total_bytes_sec_max": { "total_bytes_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
Description: "Maximum total size of bytes per second",
}, },
"total_iops_sec": { "total_iops_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
Description: "Total number of io operations per second",
}, },
"total_iops_sec_max": { "total_iops_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
Description: "Maximum total number of io operations per second",
}, },
"write_bytes_sec": { "write_bytes_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
Description: "Number of bytes to write per second",
}, },
"write_bytes_sec_max": { "write_bytes_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
Description: "Maximum number of bytes to write per second",
}, },
"write_iops_sec": { "write_iops_sec": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
Description: "Number of write operations per second",
}, },
"write_iops_sec_max": { "write_iops_sec_max": {
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Computed: true, Computed: true,
Description: "Maximum number of write operations per second",
}, },
}, },
}, },
}, },
"iqn": { "iqn": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Disk IQN",
}, },
"login": { "login": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Login to access the disk",
}, },
"milestones": { "milestones": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Milestones",
}, },
"order": { "order": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Disk order",
}, },
"params": { "params": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Disk params",
}, },
"parent_id": { "parent_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "ID of the parent disk",
}, },
"passwd": { "passwd": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Password to access the disk",
}, },
"pci_slot": { "pci_slot": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "ID of the pci slot to which the disk is connected",
}, },
"purge_attempts": { "purge_attempts": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of deletion attempts",
}, },
"purge_time": { "purge_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Time of the last deletion attempt",
}, },
"reality_device_number": { "reality_device_number": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Reality device number",
}, },
"reference_id": { "reference_id": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "ID of the reference to the disk",
}, },
"res_id": { "res_id": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Resource ID",
}, },
"res_name": { "res_name": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Name of the resource",
}, },
"role": { "role": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Disk role",
}, },
"sep_type": { "sep_type": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform",
}, },
"size_used": { "size_used": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Number of used space, in GB",
}, },
"snapshots": { "snapshots": {
Type: schema.TypeList, Type: schema.TypeList,
@ -557,43 +632,52 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{ Elem: &schema.Resource{
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"guid": { "guid": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "ID of the snapshot",
}, },
"label": { "label": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Name of the snapshot",
}, },
"res_id": { "res_id": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Reference to the snapshot",
}, },
"snap_set_guid": { "snap_set_guid": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "The set snapshot ID",
}, },
"snap_set_time": { "snap_set_time": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "The set time of the snapshot",
}, },
"timestamp": { "timestamp": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Snapshot time",
}, },
}, },
}, },
}, },
"status": { "status": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Disk status",
}, },
"tech_status": { "tech_status": {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "Technical status of the disk",
}, },
"vmid": { "vmid": {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Virtual Machine ID (Deprecated)",
}, },
} }
@ -614,11 +698,11 @@ func ResourceDisk() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout180s, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout180s, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: resourceDiskSchemaMake(), Schema: resourceDiskSchemaMake(),

@ -0,0 +1,246 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"net/url"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/rudecs/terraform-provider-decort/internal/constants"
"github.com/rudecs/terraform-provider-decort/internal/controller"
log "github.com/sirupsen/logrus"
)
func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
urlValues := &url.Values{}
c := m.(*controller.ControllerCfg)
disk, err := utilityDiskCheckPresence(ctx, d, m)
if disk == nil {
if err != nil {
return diag.FromErr(err)
}
return nil
}
snapshots := disk.Snapshots
snapshot := Snapshot{}
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
if rollback := d.Get("rollback").(bool); rollback {
urlValues.Add("diskId", strconv.Itoa(d.Get("disk_id").(int)))
urlValues.Add("label", label)
urlValues.Add("timestamp", strconv.Itoa(d.Get("timestamp").(int)))
log.Debugf("resourceDiskCreate: Snapshot rollback with label", label)
_, err := c.DecortAPICall(ctx, "POST", disksSnapshotRollbackAPI, urlValues)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
}
return resourceDiskSnapshotRead(ctx, d, m)
}
func resourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
if disk == nil {
if err != nil {
return diag.FromErr(err)
}
return nil
}
snapshots := disk.Snapshots
snapshot := Snapshot{}
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
d.SetId(d.Get("label").(string))
d.Set("timestamp", snapshot.TimeStamp)
d.Set("guid", snapshot.Guid)
d.Set("res_id", snapshot.ResId)
d.Set("snap_set_guid", snapshot.SnapSetGuid)
d.Set("snap_set_time", snapshot.SnapSetTime)
return nil
}
func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
urlValues := &url.Values{}
c := m.(*controller.ControllerCfg)
disk, err := utilityDiskCheckPresence(ctx, d, m)
if disk == nil {
if err != nil {
return diag.FromErr(err)
}
return nil
}
snapshots := disk.Snapshots
snapshot := Snapshot{}
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
if d.HasChange("rollback") && d.Get("rollback").(bool) == true {
urlValues.Add("diskId", strconv.Itoa(d.Get("disk_id").(int)))
urlValues.Add("label", label)
urlValues.Add("timestamp", strconv.Itoa(d.Get("timestamp").(int)))
log.Debugf("resourceDiskUpdtae: Snapshot rollback with label", label)
_, err := c.DecortAPICall(ctx, "POST", disksSnapshotRollbackAPI, urlValues)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
}
return resourceDiskSnapshotRead(ctx, d, m)
}
func resourceDiskSnapshotDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
disk, err := utilityDiskCheckPresence(ctx, d, m)
if disk == nil { //if disk not exits, can't call snapshotDelete
d.SetId("")
if err != nil {
return diag.FromErr(err)
}
return nil
}
params := &url.Values{}
params.Add("diskId", strconv.Itoa(d.Get("disk_id").(int)))
params.Add("label", d.Get("label").(string))
_, err = c.DecortAPICall(ctx, "POST", disksSnapshotDeleteAPI, params)
if err != nil {
return diag.FromErr(err)
}
return nil
}
func resourceDiskSnapshotSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"label": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "Name of the snapshot",
},
"rollback": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Needed in order to make a snapshot rollback",
},
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"timestamp": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Snapshot time",
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
}
return rets
}
func ResourceDiskSnapshot() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourceDiskSnapshotCreate,
ReadContext: resourceDiskSnapshotRead,
UpdateContext: resourceDiskSnapshotUpdate,
DeleteContext: resourceDiskSnapshotDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout600s,
Read: &constants.Timeout300s,
Update: &constants.Timeout300s,
Delete: &constants.Timeout300s,
Default: &constants.Timeout300s,
},
Schema: resourceDiskSnapshotSchemaMake(),
}
}

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -44,7 +45,7 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
) )
func utilityDiskListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (DisksList, error) { func utilityDiskListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}, api string) (DisksList, error) {
diskList := DisksList{} diskList := DisksList{}
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
urlValues := &url.Values{} urlValues := &url.Values{}
@ -63,7 +64,7 @@ func utilityDiskListCheckPresence(ctx context.Context, d *schema.ResourceData, m
} }
log.Debugf("utilityDiskListCheckPresence: load disk list") log.Debugf("utilityDiskListCheckPresence: load disk list")
diskListRaw, err := c.DecortAPICall(ctx, "POST", disksListAPI, urlValues) diskListRaw, err := c.DecortAPICall(ctx, "POST", api, urlValues)
if err != nil { if err != nil {
return nil, err return nil, err
} }

@ -0,0 +1,62 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"encoding/json"
"net/url"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/rudecs/terraform-provider-decort/internal/controller"
log "github.com/sirupsen/logrus"
)
func utilityDiskListTypesDetailedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (TypesDetailedList, error) {
listTypesDetailed := TypesDetailedList{}
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("detailed", "true")
log.Debugf("utilityDiskListTypesDetailedCheckPresence: load disk list Types Detailed")
diskListRaw, err := c.DecortAPICall(ctx, "POST", disksListTypesAPI, urlValues)
if err != nil {
return nil, err
}
err = json.Unmarshal([]byte(diskListRaw), &listTypesDetailed)
if err != nil {
return nil, err
}
return listTypesDetailed, nil
}

@ -0,0 +1,62 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"encoding/json"
"net/url"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/rudecs/terraform-provider-decort/internal/controller"
log "github.com/sirupsen/logrus"
)
func utilityDiskListTypesCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (TypesList, error) {
typesList := TypesList{}
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("detailed", "false")
log.Debugf("utilityDiskListTypesCheckPresence: load disk list Types Detailed")
diskListRaw, err := c.DecortAPICall(ctx, "POST", disksListTypesAPI, urlValues)
if err != nil {
return nil, err
}
err = json.Unmarshal([]byte(diskListRaw), &typesList)
if err != nil {
return nil, err
}
return typesList, nil
}

@ -248,11 +248,11 @@ func ResourceImage() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout60s, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: resourceImageSchemaMake(dataSourceImageExtendSchemaMake()), Schema: resourceImageSchemaMake(dataSourceImageExtendSchemaMake()),

@ -120,11 +120,11 @@ func ResourceImageVirtual() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout60s, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: resourceImageVirtualSchemaMake(dataSourceImageExtendSchemaMake()), Schema: resourceImageVirtualSchemaMake(dataSourceImageExtendSchemaMake()),

@ -380,11 +380,11 @@ func ResourceK8s() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout20m, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout20m, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: resourceK8sSchemaMake(), Schema: resourceK8sSchemaMake(),

@ -232,11 +232,11 @@ func ResourceK8sWg() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout20m, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout20m, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: resourceK8sWgSchemaMake(), Schema: resourceK8sWgSchemaMake(),

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -31,17 +32,21 @@ Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
package kvmvm package kvmvm
const KvmX86CreateAPI = "/restmachine/cloudapi/kvmx86/create" const (
const KvmPPCCreateAPI = "/restmachine/cloudapi/kvmppc/create" KvmX86CreateAPI = "/restmachine/cloudapi/kvmx86/create"
const ComputeGetAPI = "/restmachine/cloudapi/compute/get" KvmPPCCreateAPI = "/restmachine/cloudapi/kvmppc/create"
const RgListComputesAPI = "/restmachine/cloudapi/rg/listComputes" ComputeGetAPI = "/restmachine/cloudapi/compute/get"
const ComputeNetAttachAPI = "/restmachine/cloudapi/compute/netAttach" RgListComputesAPI = "/restmachine/cloudapi/rg/listComputes"
const ComputeNetDetachAPI = "/restmachine/cloudapi/compute/netDetach" ComputeNetAttachAPI = "/restmachine/cloudapi/compute/netAttach"
const ComputeDiskAttachAPI = "/restmachine/cloudapi/compute/diskAttach" ComputeNetDetachAPI = "/restmachine/cloudapi/compute/netDetach"
const ComputeDiskDetachAPI = "/restmachine/cloudapi/compute/diskDetach" ComputeDiskAttachAPI = "/restmachine/cloudapi/compute/diskAttach"
const ComputeStartAPI = "/restmachine/cloudapi/compute/start" ComputeDiskDetachAPI = "/restmachine/cloudapi/compute/diskDetach"
const ComputeStopAPI = "/restmachine/cloudapi/compute/stop" ComputeStartAPI = "/restmachine/cloudapi/compute/start"
const ComputeResizeAPI = "/restmachine/cloudapi/compute/resize" ComputeStopAPI = "/restmachine/cloudapi/compute/stop"
const DisksResizeAPI = "/restmachine/cloudapi/disks/resize2" ComputeResizeAPI = "/restmachine/cloudapi/compute/resize"
const ComputeDeleteAPI = "/restmachine/cloudapi/compute/delete" DisksResizeAPI = "/restmachine/cloudapi/disks/resize2"
const ComputeUpdateAPI = "/restmachine/cloudapi/compute/update" ComputeDeleteAPI = "/restmachine/cloudapi/compute/delete"
ComputeUpdateAPI = "/restmachine/cloudapi/compute/update"
ComputeDiskAddAPI = "/restmachine/cloudapi/compute/diskAdd"
ComputeDiskDeleteAPI = "/restmachine/cloudapi/compute/diskDel"
)

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -113,6 +114,36 @@ func parseComputeInterfacesToNetworks(ifaces []InterfaceRecord) []interface{} {
return result return result
} }
func findInExtraDisks(DiskId uint, ExtraDisks []interface{}) bool {
for _, ExtraDisk := range ExtraDisks {
if DiskId == uint(ExtraDisk.(int)) {
return true
}
}
return false
}
func flattenComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, disk := range disksList {
if disk.Name == "bootdisk" || findInExtraDisks(disk.ID, extraDisks) { //skip main bootdisk and extraDisks
continue
}
temp := map[string]interface{}{
"disk_name": disk.Name,
"disk_id": disk.ID,
"disk_type": disk.Type,
"sep_id": disk.SepID,
"pool": disk.Pool,
"desc": disk.Desc,
"image_id": disk.ImageID,
"size": disk.SizeMax,
}
res = append(res, temp)
}
return res
}
func flattenCompute(d *schema.ResourceData, compFacts string) error { func flattenCompute(d *schema.ResourceData, compFacts string) error {
// This function expects that compFacts string contains response from API compute/get, // This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance. // i.e. detailed information about compute instance.
@ -162,12 +193,12 @@ func flattenCompute(d *schema.ResourceData, compFacts string) error {
d.Set("sep_id", bootDisk.SepID) d.Set("sep_id", bootDisk.SepID)
d.Set("pool", bootDisk.Pool) d.Set("pool", bootDisk.Pool)
if len(model.Disks) > 0 { //if len(model.Disks) > 0 {
log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(model.Disks)) //log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(model.Disks))
if err = d.Set("extra_disks", parseComputeDisksToExtraDisks(model.Disks)); err != nil { //if err = d.Set("extra_disks", parseComputeDisksToExtraDisks(model.Disks)); err != nil {
return err //return err
} //}
} //}
if len(model.Interfaces) > 0 { if len(model.Interfaces) > 0 {
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(model.Interfaces)) log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(model.Interfaces))
@ -183,6 +214,11 @@ func flattenCompute(d *schema.ResourceData, compFacts string) error {
} }
} }
err = d.Set("disks", flattenComputeDisksDemo(model.Disks, d.Get("extra_disks").(*schema.Set).List()))
if err != nil {
return err
}
return nil return nil
} }

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -131,6 +132,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
urlValues = &url.Values{}
// Compute create API returns ID of the new Compute instance on success // Compute create API returns ID of the new Compute instance on success
d.SetId(apiResp) // update ID of the resource to tell Terraform that the resource exists, albeit partially d.SetId(apiResp) // update ID of the resource to tell Terraform that the resource exists, albeit partially
@ -148,6 +150,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
log.Errorf("resourceComputeCreate: could not delete compute after failed creation: %v", err) log.Errorf("resourceComputeCreate: could not delete compute after failed creation: %v", err)
} }
d.SetId("") d.SetId("")
urlValues = &url.Values{}
} }
}() }()
@ -189,13 +192,50 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
} }
} }
if !cleanup {
if disks, ok := d.GetOk("disks"); ok {
log.Debugf("resourceComputeCreate: Create disks on ComputeID: %d", compId)
addedDisks := disks.([]interface{})
if len(addedDisks) > 0 {
for _, disk := range addedDisks {
diskConv := disk.(map[string]interface{})
urlValues.Add("computeId", d.Id())
urlValues.Add("diskName", diskConv["disk_name"].(string))
urlValues.Add("size", strconv.Itoa(diskConv["size"].(int)))
if diskConv["disk_type"].(string) != "" {
urlValues.Add("diskType", diskConv["disk_type"].(string))
}
if diskConv["sep_id"].(int) != 0 {
urlValues.Add("sepId", strconv.Itoa(diskConv["sep_id"].(int)))
}
if diskConv["pool"].(string) != "" {
urlValues.Add("pool", diskConv["pool"].(string))
}
if diskConv["desc"].(string) != "" {
urlValues.Add("desc", diskConv["desc"].(string))
}
if diskConv["image_id"].(int) != 0 {
urlValues.Add("imageId", strconv.Itoa(diskConv["image_id"].(int)))
}
_, err := c.DecortAPICall(ctx, "POST", ComputeDiskAddAPI, urlValues)
if err != nil {
cleanup = true
return diag.FromErr(err)
}
urlValues = &url.Values{}
}
}
}
}
log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", compId, d.Get("name").(string)) log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", compId, d.Get("name").(string))
// We may reuse dataSourceComputeRead here as we maintain similarity // We may reuse dataSourceComputeRead here as we maintain similarity
// between Compute resource and Compute data source schemas // between Compute resource and Compute data source schemas
// Compute read function will also update resource ID on success, so that Terraform // Compute read function will also update resource ID on success, so that Terraform
// will know the resource exists // will know the resource exists
return dataSourceComputeRead(ctx, d, m) return resourceComputeRead(ctx, d, m)
} }
func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -236,32 +276,32 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
*/ */
// 1. Resize CPU/RAM // 1. Resize CPU/RAM
params := &url.Values{} urlValues := &url.Values{}
doUpdate := false doUpdate := false
params.Add("computeId", d.Id()) urlValues.Add("computeId", d.Id())
oldCpu, newCpu := d.GetChange("cpu") oldCpu, newCpu := d.GetChange("cpu")
if oldCpu.(int) != newCpu.(int) { if oldCpu.(int) != newCpu.(int) {
params.Add("cpu", fmt.Sprintf("%d", newCpu.(int))) urlValues.Add("cpu", fmt.Sprintf("%d", newCpu.(int)))
doUpdate = true doUpdate = true
} else { } else {
params.Add("cpu", "0") // no change to CPU allocation urlValues.Add("cpu", "0") // no change to CPU allocation
} }
oldRam, newRam := d.GetChange("ram") oldRam, newRam := d.GetChange("ram")
if oldRam.(int) != newRam.(int) { if oldRam.(int) != newRam.(int) {
params.Add("ram", fmt.Sprintf("%d", newRam.(int))) urlValues.Add("ram", fmt.Sprintf("%d", newRam.(int)))
doUpdate = true doUpdate = true
} else { } else {
params.Add("ram", "0") urlValues.Add("ram", "0")
} }
if doUpdate { if doUpdate {
log.Debugf("resourceComputeUpdate: changing CPU %d -> %d and/or RAM %d -> %d", log.Debugf("resourceComputeUpdate: changing CPU %d -> %d and/or RAM %d -> %d",
oldCpu.(int), newCpu.(int), oldCpu.(int), newCpu.(int),
oldRam.(int), newRam.(int)) oldRam.(int), newRam.(int))
params.Add("force", "true") urlValues.Add("force", "true")
_, err := c.DecortAPICall(ctx, "POST", ComputeResizeAPI, params) _, err := c.DecortAPICall(ctx, "POST", ComputeResizeAPI, urlValues)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -284,13 +324,15 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
} }
// 3. Calculate and apply changes to data disks // 3. Calculate and apply changes to data disks
err := utilityComputeExtraDisksConfigure(ctx, d, m, true) // pass do_delta = true to apply changes, if any if d.HasChange("extra_disks") {
if err != nil { err := utilityComputeExtraDisksConfigure(ctx, d, m, true) // pass do_delta = true to apply changes, if any
return diag.FromErr(err) if err != nil {
return diag.FromErr(err)
}
} }
// 4. Calculate and apply changes to network connections // 4. Calculate and apply changes to network connections
err = utilityComputeNetworksConfigure(ctx, d, m, true) // pass do_delta = true to apply changes, if any err := utilityComputeNetworksConfigure(ctx, d, m, true) // pass do_delta = true to apply changes, if any
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -319,9 +361,108 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
} }
} }
urlValues = &url.Values{}
if d.HasChange("disks") {
deletedDisks := make([]interface{}, 0)
addedDisks := make([]interface{}, 0)
oldDisks, newDisks := d.GetChange("disks")
oldConv := oldDisks.([]interface{})
newConv := newDisks.([]interface{})
for _, el := range oldConv {
if !isContainsDisk(newConv, el) {
deletedDisks = append(deletedDisks, el)
}
}
for _, el := range newConv {
if !isContainsDisk(oldConv, el) {
addedDisks = append(addedDisks, el)
}
}
if len(deletedDisks) > 0 {
urlValues.Add("computeId", d.Id())
urlValues.Add("force", "false")
_, err := c.DecortAPICall(ctx, "POST", ComputeStopAPI, urlValues)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
for _, disk := range deletedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_name"].(string) == "bootdisk" {
continue
}
urlValues.Add("computeId", d.Id())
urlValues.Add("diskId", strconv.Itoa(diskConv["disk_id"].(int)))
urlValues.Add("permanently", strconv.FormatBool(diskConv["permanently"].(bool)))
_, err := c.DecortAPICall(ctx, "POST", ComputeDiskDeleteAPI, urlValues)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
}
urlValues.Add("computeId", d.Id())
urlValues.Add("altBootId", "0")
_, err = c.DecortAPICall(ctx, "POST", ComputeStartAPI, urlValues)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
}
if len(addedDisks) > 0 {
for _, disk := range addedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_name"].(string) == "bootdisk" {
continue
}
urlValues.Add("computeId", d.Id())
urlValues.Add("diskName", diskConv["disk_name"].(string))
urlValues.Add("size", strconv.Itoa(diskConv["size"].(int)))
if diskConv["disk_type"].(string) != "" {
urlValues.Add("diskType", diskConv["disk_type"].(string))
}
if diskConv["sep_id"].(int) != 0 {
urlValues.Add("sepId", strconv.Itoa(diskConv["sep_id"].(int)))
}
if diskConv["pool"].(string) != "" {
urlValues.Add("pool", diskConv["pool"].(string))
}
if diskConv["desc"].(string) != "" {
urlValues.Add("desc", diskConv["desc"].(string))
}
if diskConv["image_id"].(int) != 0 {
urlValues.Add("imageId", strconv.Itoa(diskConv["image_id"].(int)))
}
_, err := c.DecortAPICall(ctx, "POST", ComputeDiskAddAPI, urlValues)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
}
}
}
// we may reuse dataSourceComputeRead here as we maintain similarity // we may reuse dataSourceComputeRead here as we maintain similarity
// between Compute resource and Compute data source schemas // between Compute resource and Compute data source schemas
return dataSourceComputeRead(ctx, d, m) return resourceComputeRead(ctx, d, m)
}
func isContainsDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["disk_name"].(string) == elConv["disk_name"].(string) {
return true
}
}
return false
} }
func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -346,199 +487,265 @@ func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interf
return nil return nil
} }
func ResourceCompute() *schema.Resource { func ResourceComputeSchemaMake() map[string]*schema.Schema {
return &schema.Resource{ rets := map[string]*schema.Schema{
SchemaVersion: 1, "name": {
Type: schema.TypeString,
CreateContext: resourceComputeCreate, Required: true,
ReadContext: resourceComputeRead, Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.",
UpdateContext: resourceComputeUpdate,
DeleteContext: resourceComputeDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
}, },
Timeouts: &schema.ResourceTimeout{ "rg_id": {
Create: &constants.Timeout180s, Type: schema.TypeInt,
Read: &constants.Timeout30s, Required: true,
Update: &constants.Timeout180s, ValidateFunc: validation.IntAtLeast(1),
Delete: &constants.Timeout60s, Description: "ID of the resource group where this compute should be deployed.",
Default: &constants.Timeout60s,
}, },
Schema: map[string]*schema.Schema{ "driver": {
"name": { Type: schema.TypeString,
Type: schema.TypeString, Required: true,
Required: true, ForceNew: true,
Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.", StateFunc: statefuncs.StateFuncToUpper,
}, ValidateFunc: validation.StringInSlice([]string{"KVM_X86", "KVM_PPC"}, false), // observe case while validating
Description: "Hardware architecture of this compute instance.",
"rg_id": { },
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the resource group where this compute should be deployed.",
},
"driver": { "cpu": {
Type: schema.TypeString, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, ValidateFunc: validation.IntBetween(1, constants.MaxCpusPerCompute),
StateFunc: statefuncs.StateFuncToUpper, Description: "Number of CPUs to allocate to this compute instance.",
ValidateFunc: validation.StringInSlice([]string{"KVM_X86", "KVM_PPC"}, false), // observe case while validating },
Description: "Hardware architecture of this compute instance.",
},
"cpu": { "ram": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ValidateFunc: validation.IntBetween(1, constants.MaxCpusPerCompute), ValidateFunc: validation.IntAtLeast(constants.MinRamPerCompute),
Description: "Number of CPUs to allocate to this compute instance.", Description: "Amount of RAM in MB to allocate to this compute instance.",
}, },
"ram": { "image_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ValidateFunc: validation.IntAtLeast(constants.MinRamPerCompute), ForceNew: true,
Description: "Amount of RAM in MB to allocate to this compute instance.", Description: "ID of the OS image to base this compute instance on.",
}, },
"image_id": { "boot_disk_size": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true, Description: "This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image.",
Description: "ID of the OS image to base this compute instance on.", },
},
"boot_disk_size": { "disks": {
Type: schema.TypeInt, Type: schema.TypeList,
Required: true, Computed: true,
Description: "This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image.", Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_name": {
Type: schema.TypeString,
Required: true,
Description: "Name for disk",
},
"size": {
Type: schema.TypeInt,
Required: true,
Description: "Disk size in GiB",
},
"disk_type": {
Type: schema.TypeString,
Computed: true,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"B", "D"}, false),
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data'",
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
Optional: true,
Description: "Storage endpoint provider ID; by default the same with boot disk",
},
"pool": {
Type: schema.TypeString,
Computed: true,
Optional: true,
Description: "Pool name; by default will be chosen automatically",
},
"desc": {
Type: schema.TypeString,
Computed: true,
Optional: true,
Description: "Optional description",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Optional: true,
Description: "Specify image id for create disk from template",
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Disk ID",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Disk deletion status",
},
},
}, },
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
Description: "ID of SEP to create bootDisk on. Uses image's sepId if not set.",
},
"sep_id": { "pool": {
Type: schema.TypeInt, Type: schema.TypeString,
Optional: true, Optional: true,
Computed: true, Computed: true,
ForceNew: true, ForceNew: true,
Description: "ID of SEP to create bootDisk on. Uses image's sepId if not set.", Description: "Pool to use if sepId is set, can be also empty if needed to be chosen by system.",
}, },
"pool": { "extra_disks": {
Type: schema.TypeString, Type: schema.TypeSet,
Optional: true, Optional: true,
Computed: true, MaxItems: constants.MaxExtraDisksPerCompute,
ForceNew: true, Elem: &schema.Schema{
Description: "Pool to use if sepId is set, can be also empty if needed to be chosen by system.", Type: schema.TypeInt,
}, },
Description: "Optional list of IDs of extra disks to attach to this compute. You may specify several extra disks.",
},
"extra_disks": { "network": {
Type: schema.TypeSet, Type: schema.TypeSet,
Optional: true, Optional: true,
MaxItems: constants.MaxExtraDisksPerCompute, MaxItems: constants.MaxNetworksPerCompute,
Elem: &schema.Schema{ Elem: &schema.Resource{
Type: schema.TypeInt, Schema: networkSubresourceSchemaMake(),
},
Description: "Optional list of IDs of extra disks to attach to this compute. You may specify several extra disks.",
}, },
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.",
},
"network": { /*
Type: schema.TypeSet, "ssh_keys": {
Type: schema.TypeList,
Optional: true, Optional: true,
MaxItems: constants.MaxNetworksPerCompute, MaxItems: MaxSshKeysPerCompute,
Elem: &schema.Resource{ Elem: &schema.Resource{
Schema: networkSubresourceSchemaMake(), Schema: sshSubresourceSchemaMake(),
}, },
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.", Description: "SSH keys to authorize on this compute instance.",
}, },
*/
/* "description": {
"ssh_keys": { Type: schema.TypeString,
Type: schema.TypeList, Optional: true,
Optional: true, Description: "Optional text description of this compute instance.",
MaxItems: MaxSshKeysPerCompute, },
Elem: &schema.Resource{
Schema: sshSubresourceSchemaMake(),
},
Description: "SSH keys to authorize on this compute instance.",
},
*/
"description": { "cloud_init": {
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Optional: true,
Description: "Optional text description of this compute instance.", Default: "applied",
}, DiffSuppressFunc: cloudInitDiffSupperss,
Description: "Optional cloud_init parameters. Applied when creating new compute instance only, ignored in all other cases.",
},
"cloud_init": { // The rest are Compute properties, which are "computed" once it is created
Type: schema.TypeString, "rg_name": {
Optional: true, Type: schema.TypeString,
Default: "applied", Computed: true,
DiffSuppressFunc: cloudInitDiffSupperss, Description: "Name of the resource group where this compute instance is located.",
Description: "Optional cloud_init parameters. Applied when creating new compute instance only, ignored in all other cases.", },
},
// The rest are Compute properties, which are "computed" once it is created "account_id": {
"rg_name": { Type: schema.TypeInt,
Type: schema.TypeString, Computed: true,
Computed: true, Description: "ID of the account this compute instance belongs to.",
Description: "Name of the resource group where this compute instance is located.", },
},
"account_id": { "account_name": {
Type: schema.TypeInt, Type: schema.TypeString,
Computed: true, Computed: true,
Description: "ID of the account this compute instance belongs to.", Description: "Name of the account this compute instance belongs to.",
}, },
"account_name": { "boot_disk_id": {
Type: schema.TypeString, Type: schema.TypeInt,
Computed: true, Computed: true,
Description: "Name of the account this compute instance belongs to.", Description: "This compute instance boot disk ID.",
}, },
"boot_disk_id": { "os_users": {
Type: schema.TypeInt, Type: schema.TypeList,
Computed: true, Computed: true,
Description: "This compute instance boot disk ID.", Elem: &schema.Resource{
Schema: osUsersSubresourceSchemaMake(),
}, },
Description: "Guest OS users provisioned on this compute instance.",
},
"os_users": { "started": {
Type: schema.TypeList, Type: schema.TypeBool,
Computed: true, Optional: true,
Elem: &schema.Resource{ Default: true,
Schema: osUsersSubresourceSchemaMake(), Description: "Is compute started.",
}, },
Description: "Guest OS users provisioned on this compute instance.", "detach_disks": {
}, Type: schema.TypeBool,
Optional: true,
Default: true,
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"is": {
Type: schema.TypeString,
Optional: true,
Description: "system name",
},
"ipa_type": {
Type: schema.TypeString,
Optional: true,
Description: "compute purpose",
},
}
return rets
}
"started": { func ResourceCompute() *schema.Resource {
Type: schema.TypeBool, return &schema.Resource{
Optional: true, SchemaVersion: 1,
Default: true,
Description: "Is compute started.", CreateContext: resourceComputeCreate,
}, ReadContext: resourceComputeRead,
"detach_disks": { UpdateContext: resourceComputeUpdate,
Type: schema.TypeBool, DeleteContext: resourceComputeDelete,
Optional: true,
Default: true, Importer: &schema.ResourceImporter{
}, StateContext: schema.ImportStatePassthroughContext,
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"is": {
Type: schema.TypeString,
Optional: true,
Description: "system name",
},
"ipa_type": {
Type: schema.TypeString,
Optional: true,
Description: "compute purpose",
},
}, },
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout600s,
Read: &constants.Timeout300s,
Update: &constants.Timeout300s,
Delete: &constants.Timeout300s,
Default: &constants.Timeout300s,
},
Schema: ResourceComputeSchemaMake(),
} }
} }

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -91,16 +92,33 @@ func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceDa
detach_set := old_set.(*schema.Set).Difference(new_set.(*schema.Set)) detach_set := old_set.(*schema.Set).Difference(new_set.(*schema.Set))
log.Debugf("utilityComputeExtraDisksConfigure: detach set has %d items for Compute ID %s", detach_set.Len(), d.Id()) log.Debugf("utilityComputeExtraDisksConfigure: detach set has %d items for Compute ID %s", detach_set.Len(), d.Id())
for _, diskId := range detach_set.List() {
if detach_set.Len() > 0 {
urlValues := &url.Values{} urlValues := &url.Values{}
urlValues.Add("computeId", d.Id()) urlValues.Add("computeId", d.Id())
urlValues.Add("diskId", fmt.Sprintf("%d", diskId.(int))) urlValues.Add("force", "false")
_, err := c.DecortAPICall(ctx, "POST", ComputeDiskDetachAPI, urlValues) _, err := c.DecortAPICall(ctx, "POST", ComputeStopAPI, urlValues)
if err != nil { if err != nil {
// failed to detach disk - there will be partial resource update return err
log.Errorf("utilityComputeExtraDisksConfigure: failed to detach disk ID %d from Compute ID %s: %s", diskId.(int), d.Id(), err) }
apiErrCount++ for _, diskId := range detach_set.List() {
lastSavedError = err urlValues := &url.Values{}
urlValues.Add("computeId", d.Id())
urlValues.Add("diskId", fmt.Sprintf("%d", diskId.(int)))
_, err := c.DecortAPICall(ctx, "POST", ComputeDiskDetachAPI, urlValues)
if err != nil {
// failed to detach disk - there will be partial resource update
log.Errorf("utilityComputeExtraDisksConfigure: failed to detach disk ID %d from Compute ID %s: %s", diskId.(int), d.Id(), err)
apiErrCount++
lastSavedError = err
}
}
urlValues = &url.Values{}
urlValues.Add("computeId", d.Id())
urlValues.Add("altBootId", "0")
_, err = c.DecortAPICall(ctx, "POST", ComputeStartAPI, urlValues)
if err != nil {
return err
} }
} }

@ -269,11 +269,11 @@ func ResourceLB() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout60s, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: lbResourceSchemaMake(), Schema: lbResourceSchemaMake(),

@ -214,11 +214,11 @@ func ResourceLBBackend() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout60s, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{

@ -225,11 +225,11 @@ func ResourceLBBackendServer() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout60s, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{

@ -138,11 +138,11 @@ func ResourceLBFrontend() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout60s, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{

@ -162,11 +162,11 @@ func ResourceLBFrontendBind() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout60s, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{

@ -185,11 +185,11 @@ func ResourcePfw() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout60s, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: resourcePfwSchemaMake(), Schema: resourcePfwSchemaMake(),

@ -311,11 +311,11 @@ func ResourceResgroup() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout180s, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout180s, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{

@ -182,11 +182,11 @@ func ResourceSnapshot() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout60s, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: resourceSnapshotSchemaMake(), Schema: resourceSnapshotSchemaMake(),

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -296,11 +296,11 @@ func ResourceVins() *schema.Resource {
}, },
Timeouts: &schema.ResourceTimeout{ Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout180s, Create: &constants.Timeout600s,
Read: &constants.Timeout30s, Read: &constants.Timeout300s,
Update: &constants.Timeout180s, Update: &constants.Timeout300s,
Delete: &constants.Timeout60s, Delete: &constants.Timeout300s,
Default: &constants.Timeout60s, Default: &constants.Timeout300s,
}, },
Schema: resourceVinsSchemaMake(), Schema: resourceVinsSchemaMake(),

@ -0,0 +1,32 @@
package status
type Status = string
var (
//The disk is linked to any Compute
Assigned Status = "ASSIGNED"
//An object model has been created in the database
Modeled Status = "MODELED"
//In the process of creation
Creating Status = "CREATING"
//Creating
Created Status = "CREATED"
//Physical resources are allocated for the object
Allocated Status = "ALLOCATED"
//The object has released (returned to the platform) the physical resources that it occupied
Unallocated Status = "UNALLOCATED"
//Permanently deleted
Destroyed Status = "DESTROYED"
//Deleted to Trash
Deleted Status = "DELETED"
//Deleted from storage
Purged Status = "PURGED"
)

@ -0,0 +1,9 @@
terraform {
required_providers {
decort = {
source = "digitalenergy.online/decort/decort"
version = "3.1.1"
}
}
}

@ -43,6 +43,12 @@
- lb - lb
- lb_list - lb_list
- lb_list_deleted - lb_list_deleted
- disk_list_deleted
- disk_list_unattached
- disk_list_types
- disk_list_types_detailed
- disk_snapshot_list
- disk_snapshot
- resources: - resources:
- image - image
- virtual_image - virtual_image
@ -60,6 +66,7 @@
- lb_backend - lb_backend
- lb_frontend_bind - lb_frontend_bind
- lb_backend_server - lb_backend_server
- disk_snapshot
- cloudbroker: - cloudbroker:
- data: - data:
- grid - grid

@ -0,0 +1,54 @@
/*
Пример использования
Получение списка дисков со статусом DELETED
*/
#Расскомментируйте этот код,
#и внесите необходимые правки в версию и путь,
#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером
/*
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
}
}
}
*/
provider "decort" {
authenticator = "oauth2"
#controller_url = <DECORT_CONTROLLER_URL>
controller_url = "https://ds1.digitalenergy.online"
#oauth2_url = <DECORT_SSO_URL>
oauth2_url = "https://sso.digitalenergy.online"
allow_unverified_ssl = true
}
data "decort_disk_list_deleted" "dld" {
#id аккаунта для получения списка дисков
#опциональный параметр
#тип - число
#account_id = 11111
#тип диска
#опциональный параметр
#тип - строка
#возможные типы: "b" - boot_disk, "d" - data_disk
#type = "d"
#кол-во страниц для вывода
#опицональный параметр
#тип - число
#page = 1
#размер страницы
#опицональный параметр
#тип - число
#size = 1
}
output "test" {
value = data.decort_disk_list_deleted.dld
}

@ -0,0 +1,39 @@
/*
Пример использования
Получение списка типов дисков
*/
#Расскомментируйте этот код,
#и внесите необходимые правки в версию и путь,
#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером
/*
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
}
}
}
*/
provider "decort" {
authenticator = "oauth2"
#controller_url = <DECORT_CONTROLLER_URL>
controller_url = "https://ds1.digitalenergy.online"
#oauth2_url = <DECORT_SSO_URL>
oauth2_url = "https://sso.digitalenergy.online"
allow_unverified_ssl = true
}
data "decort_disk_list_types" "dlt" {
#Нет входных параметров
#Выходной параметр
#тип - лист строк
#types {}
}
output "test" {
value = data.decort_disk_list_types.dlt
}

@ -0,0 +1,52 @@
/*
Пример использования
Получение списка типов дисков, но детально
*/
#Расскомментируйте этот код,
#и внесите необходимые правки в версию и путь,
#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером
/*
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
}
}
}
*/
provider "decort" {
authenticator = "oauth2"
#controller_url = <DECORT_CONTROLLER_URL>
controller_url = "https://ds1.digitalenergy.online"
#oauth2_url = <DECORT_SSO_URL>
oauth2_url = "https://sso.digitalenergy.online"
allow_unverified_ssl = true
}
data "decort_disk_list_types_detailed" "dltd" {
#Нет входных параметров
#Выходной параметр
#тип - лист типов
# items {}
#Выходной параметр
#Список пулов
# pools
#Выходной параметр
#Имя
# name
#Выходной параметр
#Список типов
#types
}
output "test" {
value = data.decort_disk_list_types_detailed.dltd
}

@ -0,0 +1,39 @@
/*
Пример использования
Получение списка доступных неприсоединенных дисков
*/
#Расскомментируйте этот код,
#и внесите необходимые правки в версию и путь,
#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером
/*
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
}
}
}
*/
provider "decort" {
authenticator = "oauth2"
#controller_url = <DECORT_CONTROLLER_URL>
controller_url = "https://ds1.digitalenergy.online"
#oauth2_url = <DECORT_SSO_URL>
oauth2_url = "https://sso.digitalenergy.online"
allow_unverified_ssl = true
}
data "decort_disk_list_unattached" "dlu" {
#Номер аккаунта
#опциональный параметр
#тип - число
account_id = 100
}
output "test" {
value = data.decort_disk_list_unattached.dlu
}

@ -0,0 +1,44 @@
/*
Пример использования
Получение конкретного снапшота
*/
#Расскомментируйте этот код,
#и внесите необходимые правки в версию и путь,
#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером
/*
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
}
}
}
*/
provider "decort" {
authenticator = "oauth2"
#controller_url = <DECORT_CONTROLLER_URL>
controller_url = "https://ds1.digitalenergy.online"
#oauth2_url = <DECORT_SSO_URL>
oauth2_url = "https://sso.digitalenergy.online"
allow_unverified_ssl = true
}
data "decort_disk_snapshot" "ds" {
#Номер диска
#обязательный параметр
#тип - число
disk_id = 20100
#Ярлык диска
#обязательный параметр
#тип - строка
label = "label"
}
output "test" {
value = data.decort_disk_snapshot.ds
}

@ -0,0 +1,39 @@
/*
Пример использования
Получение списка снапшотов диска
*/
#Расскомментируйте этот код,
#и внесите необходимые правки в версию и путь,
#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером
/*
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
}
}
}
*/
provider "decort" {
authenticator = "oauth2"
#controller_url = <DECORT_CONTROLLER_URL>
controller_url = "https://ds1.digitalenergy.online"
#oauth2_url = <DECORT_SSO_URL>
oauth2_url = "https://sso.digitalenergy.online"
allow_unverified_ssl = true
}
data "decort_disk_snapshot_list" "ds" {
#Номер диска
#обязательный параметр
#тип - число
disk_id = 20100
}
output "test" {
value = data.decort_disk_snapshot_list.ds
}

@ -0,0 +1,47 @@
/*
Пример использования
Ресурс снапшота диска
*/
#Расскомментируйте этот код,
#и внесите необходимые правки в версию и путь,
#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
}
}
}
provider "decort" {
authenticator = "oauth2"
#controller_url = <DECORT_CONTROLLER_URL>
controller_url = "https://ds1.digitalenergy.online"
#oauth2_url = <DECORT_SSO_URL>
oauth2_url = "https://sso.digitalenergy.online"
allow_unverified_ssl = true
}
resource "decort_disk_snapshot" "ds" {
#Номер диска
#обязательный параметр
#тип - число
disk_id = 20100
#Ярлык диска
#обязательный параметр
#тип - строка
label = "label"
#флаг rollback
#опциональный параметр
#тип - bool
rollback = false
}
output "test" {
value = decort_disk_snapshot.ds
}

@ -72,6 +72,50 @@ resource "decort_kvmvm" "comp" {
#тип - строка #тип - строка
description = "test update description in tf words update" description = "test update description in tf words update"
#Создание и добавление диска дял compute
#опциональный параметр
#тип - список дисков
disks {
#Имя диска
#Обязательный для диска параметр
#Тип - строка
disk_name = "disk_name"
#Размер диска
#Обязательный для диска параметр
#Тип - число
size = 5
#Тип диска
#опциональный параметр
#тип - строка
disk_type = "D"
#опциональный параметр
#тип - число
sep_id = 1
#Название пула
#опциональный параметр
#тип - строка
pool = "data01"
#Описание диска
#опциональный параметр
#тип - строка
desc = ""
#Айди образа
#опциональный параметр
#опциональный параметр
image_id = 378
#Флаг для удаления диска
#опциональный параметр
#тип - bool
permanently = false
}
} }

Loading…
Cancel
Save