gos_tech_4.4.3 4.4.4
nlloskutova 11 months ago
parent ce4b847596
commit 0faa6cbbd1

@ -1,6 +1,20 @@
## Version 4.4.3
## Version 4.4.4
### Bugfix
- Fixed panic
- Fixed field description in cloudapi/kvmvm/resource_compute
- Fixed field owner in request create lb in cloudapi/cloudbroker
- Changed field Value in schema resource kvmvm in blocks affinity_rules and anti_affinity_rules from required to optional in cloudapi
- Changed the format ID in resource k8s_wg from wg_id to k8s_id#wg_id
- Fixed scripts intall.bat and intall.sh
- Change logic from disk delete and disk add to disk rename when disk.disk_name field is changed in resource cloudapi/kvmvm
- Fix allowed network plugin value from "weawenet" to "weavenet" for k8ci, k8s resources in cloudbroker and for k8s resource in cloudapi
- Fix bug with deleting decort_bservice resource when setting enable=false in cloudapi/bservice
- Fix panic in data source decort_bservice_snapshot_list in cloudapi/bservice
- Fix panic in data source decort_rg_affinity_groups_list in cloudapi/rg
- Fix duplicate enabling of bservice after create in cloudapi/bservice
- Fix start of bservice after create in cloudapi/b
- Fix permanently field for disks delete, change default value from true to false in decort_kvmvm in cloudapi/kvmvm
### Feature
- Add "permanently" flag in k8s, k8s_cp in cloudapi
- Add RAM validation in cloudapi/[k8s, k8s_cp, k8s_wg, kvmvm, bservice]
- Add RAM validation in cloudbroker/[k8s, k8s_wg, kvmvm]
- Add field restore in cloudapi/disk resource

@ -7,7 +7,7 @@ ZIPDIR = ./zip
BINARY=${NAME}
WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH}
MAINPATH = ./cmd/decort/
VERSION=4.4.3
VERSION=4.4.4
OS_ARCH=$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH)
FILES = ${BINARY}_${VERSION}_darwin_amd64\

@ -8,7 +8,7 @@ require (
github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0
github.com/sirupsen/logrus v1.9.0
golang.org/x/net v0.17.0
repository.basistech.ru/BASIS/decort-golang-sdk v1.6.10
repository.basistech.ru/BASIS/decort-golang-sdk v1.6.14
)
require (

@ -285,5 +285,5 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
repository.basistech.ru/BASIS/decort-golang-sdk v1.6.10 h1:3DSdLI1exX+QFO9FKmpw5r7duOKI1rITqSBsTYFdSyw=
repository.basistech.ru/BASIS/decort-golang-sdk v1.6.10/go.mod h1:szsTGa73O75ckCWVGJPvTtRbhA/ubuYrYhMkPjvHlmE=
repository.basistech.ru/BASIS/decort-golang-sdk v1.6.14 h1:FSu4kPKHsrmZpYIUKlX0s4v53CGmmT2Ne2uWuiRh6pM=
repository.basistech.ru/BASIS/decort-golang-sdk v1.6.14/go.mod h1:szsTGa73O75ckCWVGJPvTtRbhA/ubuYrYhMkPjvHlmE=

@ -37,3 +37,6 @@ const MaxCpusPerCompute = 128
// MinRamPerCompute sets minimum amount of RAM per compute in MB
const MinRamPerCompute = 128
// RAMDivisibility sets divisibility of RAM value
const RAMDivisibility = 128

@ -49,7 +49,7 @@ func dataSourceBasicServiceSnapshotListRead(ctx context.Context, d *schema.Resou
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenBasicServiceSnapshots(basicServiceSnapshotList))
d.Set("items", flattenBasicServiceSnapshots(basicServiceSnapshotList.Data))
return nil
}

@ -34,14 +34,17 @@ package bservice
import (
"context"
"errors"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
)
@ -83,6 +86,8 @@ func resourceBasicServiceCreate(ctx context.Context, d *schema.ResourceData, m i
return diag.FromErr(err)
}
warnings := dc.Warnings{}
if d.Get("enable").(bool) && (service.Status == status.Disabled || service.Status == status.Created) {
log.Debugf("trying to enable bservice %v", serviceId)
_, err := c.CloudAPI().BService().Enable(ctx, bservice.EnableRequest{
@ -90,21 +95,29 @@ func resourceBasicServiceCreate(ctx context.Context, d *schema.ResourceData, m i
})
if err != nil {
return diag.FromErr(err)
warnings.Add(err)
}
}
if d.Get("start").(bool) && d.Get("enable").(bool) {
if d.Get("start").(bool) {
log.Debugf("trying to start bservice %v", serviceId)
_, err := c.CloudAPI().BService().Enable(ctx, bservice.EnableRequest{
ServiceID: serviceId,
})
if err != nil {
return diag.FromErr(err)
if !d.Get("enable").(bool) {
warnings.Add(errors.New("can not start bservice that is not enabled. Set enable = true and start = true to enable and start bservice"))
}
if d.Get("enable").(bool) {
_, err := c.CloudAPI().BService().Start(ctx, bservice.StartRequest{
ServiceID: serviceId,
})
if err != nil {
warnings.Add(err)
}
}
}
return resourceBasicServiceRead(ctx, d, m)
return append(warnings.Get(), resourceBasicServiceRead(ctx, d, m)...)
}
func resourceBasicServiceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -230,25 +243,39 @@ func resourceBasicServiceUpdate(ctx context.Context, d *schema.ResourceData, m i
case status.Disabling:
log.Debugf("The basic service is in status: %s, troubles can occur with the update.", bs.Status)
case status.Deleted:
id, _ := strconv.ParseUint(d.Id(), 10, 64)
restoreReq := bservice.RestoreRequest{
ServiceID: id,
}
enableReq := bservice.EnableRequest{
ServiceID: id,
}
if d.Get("restore").(bool) {
id, _ := strconv.ParseUint(d.Id(), 10, 64)
restoreReq := bservice.RestoreRequest{
ServiceID: id,
}
_, err := c.CloudAPI().BService().Restore(ctx, restoreReq)
if err != nil {
return diag.FromErr(err)
}
_, err := c.CloudAPI().BService().Restore(ctx, restoreReq)
if err != nil {
return diag.FromErr(err)
}
_, err = c.CloudAPI().BService().Enable(ctx, enableReq)
if err != nil {
return diag.FromErr(err)
}
hasChanged = true
if d.Get("enable").(bool) {
_, err := c.CloudAPI().BService().Enable(ctx, bservice.EnableRequest{
ServiceID: id,
})
if err != nil {
return diag.FromErr(err)
}
}
hasChanged = true
if d.Get("start").(bool) {
_, err := c.CloudAPI().BService().Start(ctx, bservice.StartRequest{
ServiceID: id,
})
if err != nil {
return diag.FromErr(err)
}
}
}
case status.Deleting:
case status.Destroyed:
d.SetId("")
@ -291,19 +318,6 @@ func resourceBasicServiceUpdate(ctx context.Context, d *schema.ResourceData, m i
}
}
if d.HasChange("restore") {
restore := d.Get("restore").(bool)
if restore {
req := bservice.RestoreRequest{
ServiceID: uint64(d.Get("service_id").(int)),
}
_, err := c.CloudAPI().BService().Restore(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
if d.HasChange("start") {
start := d.Get("start").(bool)
if start {

@ -43,6 +43,7 @@ import (
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/validators"
)
func resourceBasicServiceGroupCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -107,12 +108,12 @@ func resourceBasicServiceGroupCreate(ctx context.Context, d *schema.ResourceData
d.SetId(strconv.FormatUint(compgroupId, 10))
d.Set("compgroup_id", compgroupId)
serviceId:= uint64(d.Get("service_id").(int))
serviceId := uint64(d.Get("service_id").(int))
if d.Get("start").(bool) {
log.Debugf("trying to start bservice group %v", compgroupId)
_, err := c.CloudAPI().BService().GroupStart(ctx, bservice.GroupStartRequest{
ServiceID: serviceId,
ServiceID: serviceId,
CompGroupID: compgroupId,
})
@ -371,8 +372,12 @@ func resourceBasicServiceGroupSchemaMake() map[string]*schema.Schema {
Description: "compute CPU number. All computes in the group have the same CPU count",
},
"ram": {
Type: schema.TypeInt,
Required: true,
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.All(
validation.IntAtLeast(constants.MinRamPerCompute),
validators.DivisibleBy(constants.RAMDivisibility),
),
Description: "compute RAM volume in MB. All computes in the group have the same RAM volume",
},
"disk": {

@ -42,7 +42,7 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityBasicServiceSnapshotListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (bservice.ListSnapshots, error) {
func utilityBasicServiceSnapshotListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*bservice.ListInfoSnapshots, error) {
c := m.(*controller.ControllerCfg)
var id uint64

@ -231,20 +231,23 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
return diag.Errorf("The resource cannot be updated because it has been destroyed")
// return resourceDiskCreate(ctx, d, m)
case status.Deleted:
hasChangeState = true
req := disks.RestoreRequest{
DiskID: disk.ID,
}
if restore, ok:= d.GetOk("restore"); ok && restore.(bool) {
hasChangeState = true
req := disks.RestoreRequest{
DiskID: disk.ID,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudAPI().Disks().Restore(ctx, req)
if err != nil {
warnings.Add(err)
}
_, err := c.CloudAPI().Disks().Restore(ctx, req)
if err != nil {
warnings.Add(err)
}
case status.Assigned:
case status.Modeled:
@ -357,10 +360,18 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface
}
req := disks.DeleteRequest{
DiskID: disk.ID,
Detach: d.Get("detach").(bool),
Permanently: d.Get("permanently").(bool),
Reason: d.Get("reason").(string),
DiskID: disk.ID,
}
if detach, ok := d.GetOk("detach"); ok {
req.Detach = detach.(bool)
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
c := m.(*controller.ControllerCfg)
@ -446,6 +457,11 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Optional: true,
Computed: true,
},
"restore": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"iotune": {
Type: schema.TypeList,
Optional: true,

@ -35,6 +35,8 @@ package k8s
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/validators"
)
func nodeMasterDefault() K8sNodeRecord {
@ -85,21 +87,25 @@ func mastersSchemaMake() map[string]*schema.Schema {
Optional: true,
}
masters["cpu"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Type: schema.TypeInt,
Required: true,
//ForceNew: true,
Description: "Node CPU count.",
}
masters["ram"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Type: schema.TypeInt,
Required: true,
//ForceNew: true,
ValidateFunc: validation.All(
validation.IntAtLeast(constants.MinRamPerCompute),
validators.DivisibleBy(constants.RAMDivisibility),
),
Description: "Node RAM in MB.",
}
masters["disk"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Type: schema.TypeInt,
Required: true,
//ForceNew: true,
Description: "Node boot disk size in GB.",
}
@ -117,7 +123,11 @@ func workersSchemaMake() map[string]*schema.Schema {
Required: true,
},
"ram": {
Type: schema.TypeInt,
Type: schema.TypeInt,
ValidateFunc: validation.All(
validation.IntAtLeast(constants.MinRamPerCompute),
validators.DivisibleBy(constants.RAMDivisibility),
),
Required: true,
},
"cpu": {

@ -513,6 +513,7 @@ func resourceK8sUpdate(ctx context.Context, d *schema.ResourceData, m interface{
}
func resourceK8sDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceK8sDelete: called with id %s, rg %d", d.Id(), d.Get("rg_id").(int))
cluster, err := utilityK8sCheckPresence(ctx, d, m)
@ -520,12 +521,14 @@ func resourceK8sDelete(ctx context.Context, d *schema.ResourceData, m interface{
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
req := k8s.DeleteRequest{
K8SID: cluster.ID,
Permanently: true,
req := k8s.DeleteRequest{K8SID: cluster.ID}
if val, ok := d.GetOk("permanently"); ok {
req.Permanently = val.(bool)
}
c := m.(*controller.ControllerCfg)
_, err = c.CloudAPI().K8S().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
@ -679,8 +682,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "insert ssl certificate in x509 pem format",
},
////
"desc": {
Type: schema.TypeString,
Optional: true,
@ -741,6 +742,13 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "IP address of default load balancer.",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Determines if cluster should be destroyed",
},
"rg_name": {
Type: schema.TypeString,
Computed: true,

@ -52,6 +52,7 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/validators"
)
func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -530,25 +531,29 @@ func resourceK8sCPUpdate(ctx context.Context, d *schema.ResourceData, m interfac
}
func resourceK8sCPDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceK8sControlPlaneDelete: called with id %s, rg %d", d.Id(), d.Get("rg_id").(int))
log.Debugf("resourceK8sDelete: called with id %s, rg %d", d.Id(), d.Get("rg_id").(int))
cluster, err := utilityK8sCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
req := k8s.DeleteRequest{
K8SID: cluster.ID,
Permanently: true,
req := k8s.DeleteRequest{K8SID: cluster.ID}
if val, ok := d.GetOk("permanently"); ok {
req.Permanently = val.(bool)
}
c := m.(*controller.ControllerCfg)
_, err = c.CloudAPI().K8S().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
return nil
}
func resourceK8sCPSchemaMake() map[string]*schema.Schema {
@ -590,9 +595,13 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
Description: "Node CPU count.",
},
"ram": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Type: schema.TypeInt,
Optional: true,
Computed: true,
ValidateFunc: validation.All(
validation.IntAtLeast(constants.MinRamPerCompute),
validators.DivisibleBy(constants.RAMDivisibility),
),
Description: "Node RAM in MB.",
},
"disk": {
@ -679,6 +688,13 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "insert ssl certificate in x509 pem format",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Determines if cluster should be destroyed",
},
////
"extnet_id": {
Type: schema.TypeInt,

@ -34,16 +34,19 @@ package k8s
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/validators"
)
func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -101,7 +104,7 @@ func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interfac
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(resp, 10))
d.SetId(fmt.Sprintf("%d#%d", d.Get("k8s_id").(int), resp))
return resourceK8sWgRead(ctx, d, m)
}
@ -125,7 +128,7 @@ func resourceK8sWgRead(ctx context.Context, d *schema.ResourceData, m interface{
d.Set("wg_id", wg.ID)
if strings.Contains(d.Id(), "#") {
k8sId, err := strconv.Atoi(strings.Split(d.Id(), "#")[1])
k8sId, err := strconv.Atoi(strings.Split(d.Id(), "#")[0])
if err != nil {
return diag.FromErr(err)
}
@ -134,8 +137,7 @@ func resourceK8sWgRead(ctx context.Context, d *schema.ResourceData, m interface{
} else {
d.Set("k8s_id", d.Get("k8s_id"))
}
d.SetId(strings.Split(d.Id(), "#")[0])
d.SetId(fmt.Sprintf("%d#%d", d.Get("k8s_id").(int), wg.ID))
flattenWg(d, *wg, workersComputeList)
@ -161,11 +163,10 @@ func resourceK8sWgUpdate(ctx context.Context, d *schema.ResourceData, m interfac
return diag.FromErr(err)
}
wgId, _ := strconv.ParseUint(d.Id(), 10, 64)
if newNum := d.Get("num").(int); uint64(newNum) > wg.Num {
req := k8s.WorkerAddRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: wgId,
WorkersGroupID: wg.ID,
Num: uint64(newNum) - wg.Num,
}
@ -177,7 +178,7 @@ func resourceK8sWgUpdate(ctx context.Context, d *schema.ResourceData, m interfac
for i := int(wg.Num) - 1; i >= newNum; i-- {
req := k8s.DeleteWorkerFromGroupRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: wgId,
WorkersGroupID: wg.ID,
WorkerID: wg.DetailedInfo[i].ID,
}
@ -191,7 +192,7 @@ func resourceK8sWgUpdate(ctx context.Context, d *schema.ResourceData, m interfac
if d.HasChange("cloud_init") {
req := k8s.UpdateWorkerNodesMetaDataRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: wgId,
WorkersGroupID: wg.ID,
UserData: d.Get("cloud_init").(string),
}
@ -229,16 +230,16 @@ func resourceK8sWgDelete(ctx context.Context, d *schema.ResourceData, m interfac
func resourceK8sWgSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"k8s_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Type: schema.TypeInt,
Required: true,
// ForceNew: true,
Description: "ID of k8s instance.",
},
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Type: schema.TypeString,
Required: true,
// ForceNew: true,
Description: "Name of the worker group.",
},
@ -250,18 +251,22 @@ func resourceK8sWgSchemaMake() map[string]*schema.Schema {
},
"cpu": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Type: schema.TypeInt,
Optional: true,
// ForceNew: true,
Default: 1,
Description: "Worker node CPU count.",
},
"ram": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Default: 1024,
Type: schema.TypeInt,
Optional: true,
//ForceNew: true,
Default: 1024,
ValidateFunc: validation.All(
validation.IntAtLeast(constants.MinRamPerCompute),
validators.DivisibleBy(constants.RAMDivisibility),
),
Description: "Worker node RAM in MB.",
},

@ -91,11 +91,11 @@ func utilityK8sWgCheckPresence(ctx context.Context, d *schema.ResourceData, m in
var err error
if strings.Contains(d.Id(), "#") {
wgId, err = strconv.Atoi(strings.Split(d.Id(), "#")[0])
wgId, err = strconv.Atoi(strings.Split(d.Id(), "#")[1])
if err != nil {
return nil, err
}
k8sId, err = strconv.Atoi(strings.Split(d.Id(), "#")[1])
k8sId, err = strconv.Atoi(strings.Split(d.Id(), "#")[0])
if err != nil {
return nil, err
}
@ -142,7 +142,7 @@ func utilityK8sWgListCheckPresence(ctx context.Context, d *schema.ResourceData,
func utilityK8sWgCloudInitCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) {
c := m.(*controller.ControllerCfg)
req := k8s.GetWorkerNodesMetaDataRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: uint64(d.Get("wg_id").(int)),
}

@ -33,6 +33,7 @@ Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/w
package kvmvm
import (
"context"
"encoding/json"
"sort"
"strconv"
@ -236,27 +237,35 @@ func flattenBootDisk(bootDisk *compute.ItemComputeDisk) []map[string]interface{}
return res
}
func flattenComputeDisksDemo(disksList compute.ListComputeDisks, extraDisks []interface{}, bootDiskId uint64) []map[string]interface{} {
func flattenComputeDisksDemo(ctx context.Context, d *schema.ResourceData, disksList compute.ListComputeDisks, extraDisks []interface{}, bootDiskId uint64) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(disksList))
for _, disk := range disksList {
if disk.ID == bootDiskId || findInExtraDisks(uint(disk.ID), extraDisks) { //skip main bootdisk and extraDisks
continue
}
permanently, ok := ctx.Value(DiskKey(strconv.Itoa(int(disk.ID)))).(bool) // get permamently from Create or Update context
if !ok {
permanently = getPermanentlyByDiskID(d, disk.ID) // get permanently from state when Read is not after Create/Update
}
temp := map[string]interface{}{
"disk_name": disk.Name,
"disk_id": disk.ID,
"disk_type": disk.Type,
"sep_id": disk.SepID,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"pool": disk.Pool,
"desc": disk.Description,
"image_id": disk.ImageID,
"size": disk.SizeMax,
"disk_name": disk.Name,
"disk_id": disk.ID,
"disk_type": disk.Type,
"sep_id": disk.SepID,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"pool": disk.Pool,
"desc": disk.Description,
"image_id": disk.ImageID,
"size": disk.SizeMax,
"permanently": permanently,
}
res = append(res, temp)
}
sort.Slice(res, func(i, j int) bool {
return res[i]["disk_id"].(uint64) < res[j]["disk_id"].(uint64)
})
@ -264,6 +273,21 @@ func flattenComputeDisksDemo(disksList compute.ListComputeDisks, extraDisks []in
return res
}
// getPermanentlyByDiskID gets permanently value of specific disk (by diskId) from disks current state
func getPermanentlyByDiskID(d *schema.ResourceData, diskId uint64) bool {
disks := d.Get("disks").([]interface{})
for _, diskItem := range disks {
disk := diskItem.(map[string]interface{})
if uint64(disk["disk_id"].(int)) == diskId {
return disk["permanently"].(bool)
}
}
log.Infof("getPermanentlyByDiskID: disk with id %d not found in state", diskId)
return false
}
func flattenNetwork(interfaces compute.ListInterfaces) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(interfaces))
@ -288,7 +312,7 @@ func findBootDisk(disks compute.ListComputeDisks) *compute.ItemComputeDisk {
return nil
}
func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) error {
func flattenCompute(ctx context.Context, d *schema.ResourceData, computeRec compute.RecordCompute) error {
// This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance.
//
@ -322,7 +346,7 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) er
d.Set("deleted_time", computeRec.DeletedTime)
d.Set("description", computeRec.Description)
d.Set("devices", string(devices))
err := d.Set("disks", flattenComputeDisksDemo(computeRec.Disks, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID))
err := d.Set("disks", flattenComputeDisksDemo(ctx, d, computeRec.Disks, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID))
if err != nil {
return err
}
@ -658,7 +682,7 @@ func flattenSnapshotList(computeSnapshotUsages *compute.ListSnapShots) []map[str
for _, computeUsage := range computeSnapshotUsages.Data {
temp := map[string]interface{}{
"disks": computeUsage.Disks,
"guid": computeUsage.GUID,
"guid": computeUsage.GUID,
"label": computeUsage.Label,
"timestamp": computeUsage.Timestamp,
}

@ -40,6 +40,7 @@ import (
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/kvmppc"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/kvmx86"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
@ -47,12 +48,16 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/statefuncs"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/validators"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
// DiskKey is custom string type to set up context Key for Disk ID
type DiskKey string
func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceComputeCreate: called for Compute name %q, RG ID %d", d.Get("name").(string), d.Get("rg_id").(int))
c := m.(*controller.ControllerCfg)
@ -307,11 +312,13 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
req.ImageID = uint64(diskConv["image_id"].(int))
}
_, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
diskId, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
if err != nil {
cleanup = true
return diag.FromErr(err)
}
ctx = context.WithValue(ctx, DiskKey(strconv.Itoa(int(diskId))), diskConv["permanently"].(bool))
}
}
}
@ -541,7 +548,7 @@ func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interfac
}
}
if err = flattenCompute(d, computeRec); err != nil {
if err = flattenCompute(ctx, d, computeRec); err != nil {
return diag.FromErr(err)
}
@ -747,14 +754,23 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
if d.HasChange("disks") {
deletedDisks := make([]interface{}, 0)
addedDisks := make([]interface{}, 0)
updatedDisks := make([]interface{}, 0)
resizedDisks := make([]interface{}, 0)
renamedDisks := make([]interface{}, 0)
// save permanently in disks based on disk_id to context
for _, diskItemInterface := range d.Get("disks").([]interface{}) {
diskItem := diskItemInterface.(map[string]interface{})
diskId := diskItem["disk_id"].(int)
permanently := diskItem["permanently"].(bool)
ctx = context.WithValue(ctx, DiskKey(strconv.Itoa(diskId)), permanently)
}
oldDisks, newDisks := d.GetChange("disks")
oldConv := oldDisks.([]interface{})
newConv := newDisks.([]interface{})
for _, el := range oldConv {
if !isContainsDisk(newConv, el) {
if !isContainsDisk(newConv, el) && !isRenameDisk(newConv, el) && !isResizeDisk(newConv, el) {
flag := false
extraDisks := d.Get("extra_disks").(*schema.Set).List()
delDisk := el.(map[string]interface{})
@ -778,10 +794,12 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
for _, el := range newConv {
if !isContainsDisk(oldConv, el) {
addedDisks = append(addedDisks, el)
} else {
if isChangeDisk(oldConv, el) {
updatedDisks = append(updatedDisks, el)
}
}
if isResizeDisk(oldConv, el) {
resizedDisks = append(resizedDisks, el)
}
if isRenameDisk(oldConv, el) {
renamedDisks = append(renamedDisks, el)
}
}
@ -850,15 +868,17 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
if diskConv["image_id"].(int) != 0 {
req.ImageID = uint64(diskConv["image_id"].(int))
}
_, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
diskId, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
if err != nil {
return diag.FromErr(err)
}
ctx = context.WithValue(ctx, DiskKey(strconv.Itoa(int(diskId))), diskConv["permanently"].(bool))
}
}
if len(updatedDisks) > 0 {
for _, disk := range updatedDisks {
if len(resizedDisks) > 0 {
for _, disk := range resizedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_type"].(string) == "B" {
continue
@ -875,6 +895,22 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
}
}
}
if len(renamedDisks) > 0 {
for _, disk := range renamedDisks {
diskConv := disk.(map[string]interface{})
req := disks.RenameRequest{
DiskID: uint64(diskConv["disk_id"].(int)),
Name: diskConv["disk_name"].(string),
}
_, err := c.CloudAPI().Disks().Rename(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
}
if d.HasChange("started") {
@ -1398,7 +1434,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
return warnings.Get()
}
func isChangeDisk(els []interface{}, el interface{}) bool {
func isResizeDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
@ -1410,11 +1446,23 @@ func isChangeDisk(els []interface{}, el interface{}) bool {
return false
}
func isRenameDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["disk_id"].(int) == elConv["disk_id"].(int) &&
elOldConv["disk_name"].(string) != elConv["disk_name"].(string) {
return true
}
}
return false
}
func isContainsDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["disk_name"].(string) == elConv["disk_name"].(string) {
if elOldConv["disk_id"].(int) == elConv["disk_id"].(int) {
return true
}
}
@ -1505,7 +1553,7 @@ func disksSubresourceSchemaMake() map[string]*schema.Schema {
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Default: false,
Description: "Disk deletion status",
},
"disk_id": {
@ -1634,10 +1682,13 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Description: "Number of CPUs to allocate to this compute instance.",
},
"ram": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(constants.MinRamPerCompute),
Description: "Amount of RAM in MB to allocate to this compute instance.",
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.All(
validation.IntAtLeast(constants.MinRamPerCompute),
validators.DivisibleBy(constants.RAMDivisibility),
),
Description: "Amount of RAM in MB to allocate to this compute instance.",
},
"image_id": {
Type: schema.TypeInt,
@ -1686,7 +1737,7 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
},
"value": {
Type: schema.TypeString,
Required: true,
Optional: true,
Description: "value that must match the key to be taken into account when analyzing this rule",
},
},
@ -1722,7 +1773,7 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
},
"value": {
Type: schema.TypeString,
Required: true,
Optional: true,
Description: "value that must match the key to be taken into account when analyzing this rule",
},
},

@ -82,8 +82,17 @@ func dataSourceRgAffinityGroupsListSchemaMake() map[string]*schema.Schema {
"ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"id": {
Type: schema.TypeInt,
Computed: true,
},
"node_id": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
},

@ -461,7 +461,7 @@ func flattenRgListLb(listLb *rg.ListLB) []map[string]interface{} {
}
func flattenRgListPfw(listPfw *rg.ListPortForwards) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len (listPfw.Data))
res := make([]map[string]interface{}, 0, len(listPfw.Data))
for _, pfw := range listPfw.Data {
temp := map[string]interface{}{
"public_port_end": pfw.PublicPortEnd,
@ -538,10 +538,25 @@ func flattenRgAffinityGroupComputes(list rg.ListAffinityGroupsComputes) []map[st
func flattenRgListGroups(list *rg.ListAffinityGroups) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(list.Data))
for groupKey, groupVal := range list.Data {
for _, groupVal := range list.Data {
for label, ag := range groupVal {
temp := map[string]interface{}{
"label": label,
"ids": flattenRgAffinityListGroup(ag),
}
res = append(res, temp)
}
}
return res
}
func flattenRgAffinityListGroup(list rg.ListAffinityGroup) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(list))
for _, ag := range list {
temp := map[string]interface{}{
"label": groupKey,
"ids": groupVal,
"id": ag.ID,
"node_id": ag.NodeID,
}
res = append(res, temp)
}
@ -564,10 +579,10 @@ func flattenRGResourceConsumptionList(rg *rg.ListResourceConsumption) []map[stri
res := make([]map[string]interface{}, 0, len(rg.Data))
for _, rc := range rg.Data {
temp := map[string]interface{}{
"consumed": flattenResource(rc.Consumed),
"reserved": flattenResource(rc.Reserved),
"consumed": flattenResource(rc.Consumed),
"reserved": flattenResource(rc.Reserved),
"resource_limits": flattenRgResourceLimits(rc.ResourceLimits),
"rg_id": rc.RGID,
"rg_id": rc.RGID,
}
res = append(res, temp)
}

@ -33,7 +33,10 @@ package k8s
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/validators"
)
func nodeMasterDefault() k8s.MasterGroup {
@ -114,9 +117,13 @@ func nodeK8sSubresourceSchemaMake() map[string]*schema.Schema {
},
"ram": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Type: schema.TypeInt,
Required: true,
//ForceNew: true,
ValidateFunc: validation.All(
validation.IntAtLeast(constants.MinRamPerCompute),
validators.DivisibleBy(constants.RAMDivisibility),
),
Description: "Node RAM in MB.",
},

@ -36,10 +36,12 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/validators"
)
func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -178,10 +180,14 @@ func resourceK8sWgSchemaMake() map[string]*schema.Schema {
},
"ram": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Default: 1024,
Type: schema.TypeInt,
Optional: true,
//ForceNew: true,
Default: 1024,
ValidateFunc: validation.All(
validation.IntAtLeast(constants.MinRamPerCompute),
validators.DivisibleBy(constants.RAMDivisibility),
),
Description: "Worker node RAM in MB.",
},

@ -43,6 +43,7 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/statefuncs"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/validators"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@ -386,10 +387,13 @@ func ResourceCompute() *schema.Resource {
},
"ram": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(constants.MinRamPerCompute),
Description: "Amount of RAM in MB to allocate to this compute instance.",
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.All(
validation.IntAtLeast(constants.MinRamPerCompute),
validators.DivisibleBy(constants.RAMDivisibility),
),
Description: "Amount of RAM in MB to allocate to this compute instance.",
},
"image_id": {

@ -0,0 +1,22 @@
package validators
import (
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func DivisibleBy(divisibility int) schema.SchemaValidateFunc {
return func(i interface{}, k string) (warnings []string, errors []error) {
total, ok := i.(int)
if !ok {
errors = append(errors, fmt.Errorf("expected type of %s to be integer", k))
return warnings, errors
}
if total % divisibility != 0 {
errors = append(errors, fmt.Errorf("expected value of %s to be divisible by %d", k, divisibility))
}
return warnings, errors
}
}

@ -47,7 +47,7 @@ resource "decort_bservice" "b" {
#доступность сервиса
#необязательный параметр
#тип - булев тип
#используется при редактировании ресурса
#используется при редактировании и создании ресурса
#по-умолачанию - false
#enable = true
@ -78,7 +78,7 @@ resource "decort_bservice" "b" {
#старт сервиса
#необязательный параметр
#тип - булев тип
#используется при редактировании ресурса
#используется при редактировании и создании ресурса
#по-умолачанию - false
#start = false

@ -278,6 +278,11 @@ resource "decort_k8s" "cluster" {
# тип - файл
oidc_cert = file("ca.crt")
# команда destroy удаляет кластер без возможности восстановления
# опциональный параметр
# тип - булев тип
permanently = true
}
output "test_cluster" {

@ -90,6 +90,12 @@ resource "decort_k8s_cp" "cp" {
# Опциональный параметр
# bool
with_lb = true
# команда destroy удаляет кластер без возможности восстановления
# опциональный параметр
# тип - булев тип
permanently = true
}
output "cp_out" {

@ -160,7 +160,7 @@ resource "decort_kvmvm" "comp" {
key = "testkey"
#ключ правила
#обязательный параметр
#необязательный параметр
#тип строка
value = "testvalue"
}
@ -194,7 +194,7 @@ resource "decort_kvmvm" "comp" {
key = "testkey"
#ключ правила
#обязательный параметр
#необязательный параметр
#тип строка
value = "testvalue"
}

@ -25,32 +25,95 @@ provider "decort" {
allow_unverified_ssl = true
}
resource "decort_disk" "acl" {
resource "decort_disk" "disk" {
#id аккаунта
#обязательный параметр
#тип - число
account_id = 88366
#gid
#обязательный параметр
#тип - число
gid = 212
#название диска диска
#обязательный параметр
#тип - строка
disk_name = "super-disk-re"
#максимальный размер диска
#обязательный параметр
#тип - число
#значение по умолчанию 10
size_max = 20
restore = true
permanently = true
reason = "delete"
iotune {
read_bytes_sec = 0
read_bytes_sec_max = 0
read_iops_sec = 0
read_iops_sec_max = 0
size_iops_sec = 0
total_bytes_sec = 0
total_bytes_sec_max = 0
total_iops_sec = 3000
total_iops_sec_max = 0
write_bytes_sec = 0
write_bytes_sec_max = 0
write_iops_sec = 0
write_iops_sec_max = 0
}
#описание диска
#опциональный параметр
#тип - строка
#desc = "description"
#тип диска
#опциональный параметр
#тип - строка
#возможные значения "D", "B", "T"
#type = "D"
#sep id
#опциональный параметр
#тип - число
#значение по умолчанию 0
#sep_id = 1
#название pool
#опциональный параметр
#тип - строка
#pool = 1
#флаг для восстановления диска
#опциональный параметр
#тип - булев
#restore = true
#флаг для удаления диска
#опциональный параметр
#тип - булев
#permanently = true
#причина удаления диска
#опциональный параметр
#тип - строка
#reason = "delete"
#флаг поделиться диском
#опциональный параметр
#тип - булев
#shareable = true
#флаг отсоединения диска от машины перед удалением
#опциональный параметр
#тип - булев
#detach = true
#настройки лимитов операций записи/чтения с диска
#опциональный параметр
#тип - блок, тип вложенных полей - число
# iotune {
# read_bytes_sec = 0
# read_bytes_sec_max = 0
# read_iops_sec = 0
# read_iops_sec_max = 0
# size_iops_sec = 0
# total_bytes_sec = 0
# total_bytes_sec_max = 0
# total_iops_sec = 3000
# total_iops_sec_max = 0
# write_bytes_sec = 0
# write_bytes_sec_max = 0
# write_iops_sec = 0
# write_iops_sec_max = 0
# }
}
output "test" {
value = decort_disk.acl
value = decort_disk.disk
}

@ -18,12 +18,10 @@
@echo off
FOR %%f IN (bin\*) DO (
if "%%~xf" == ".exe" (
set filename=bin\%%~nf
) else (
set filename=%%f
)
cd /d "%~dp0"
pushd .\bin\
for %%f in (*.exe) do (
set filename=%%~nf
)
for /F "tokens=1,2,3,4 delims=_" %%a in ("%filename%") do (
@ -32,27 +30,16 @@ for /F "tokens=1,2,3,4 delims=_" %%a in ("%filename%") do (
set arch=%%d
)
if "%os%" neq "windows" (
echo Unable to find provider executable, is it moved or renamed?
pause
exit /b
)
set provider_path=%appdata%\terraform.d\plugins\basis\decort\decort\%version%\%os%_%arch%\
if exist %provider_path% (
echo Provider directory already exists, checking for decort provider executable..
dir /b /s /a "%provider_path%" | findstr .>nul || (
copy %filename% %provider_path%\terraform-provider-decort.exe
copy /y %filename%.exe %provider_path%
if errorlevel 1 (
pause
exit /b
)
call :print_success
pause
exit /b
)
echo DECORT provider version %version% is already installed. Exiting.
call :print_success
pause
exit /b
) else (
@ -62,7 +49,7 @@ if exist %provider_path% (
pause
exit /b
)
copy %filename% %provider_path%\terraform-provider-decort.exe
copy %filename%.exe %provider_path%
if errorlevel 1 (
pause
exit /b

@ -47,7 +47,7 @@ install () {
then
echo "Provider directory already exists, checking for decort provider executable.."
if [[ ! "$(ls -A $plugins_dir$provider_path)" ]]; then
cp bin/terraform-provider-decort_$version\_$os\_$arch $plugins_dir$provider_path/terraform-provider-decort
cp bin/terraform-provider-decort_$version\_$os\_$arch $plugins_dir$provider_path/terraform-provider-decort_$version/$os\_$arch
print_info
else
echo "DECORT provider version $version is already installed. Exiting.."
@ -56,7 +56,7 @@ install () {
else
echo "Creating provider directory.."
mkdir -p $plugins_dir/$provider_path
cp bin/terraform-provider-decort_$version\_$os\_$arch $plugins_dir$provider_path/terraform-provider-decort
cp bin/terraform-provider-decort_$version\_$os\_$arch $plugins_dir$provider_path/terraform-provider-decort_$version/$os\_$arch
echo "DECORT provider version $version has been successfully installed"
print_info
fi

Loading…
Cancel
Save