gos_tech_4.4.3 4.2.0
Tim Tkachev 2 years ago
parent 371bb0d90f
commit 0e64974821

@ -1,10 +1,8 @@
## Version 4.1.1
## Version 4.2.0
## Bug Fixes
- Resolved possible context cancelation issues when awaiting platform response
- resource decort_k8s_wg:
- Added ignore of "workersGroupName" label
- resource decort_kvmvm:
- Fixed "boot_disk_size" incorrect state
- Fixed disappearing of "cloud_init" field value
- Set "started" default value to true
## Features
- Added "decort_k8s_cp" resource:
- Used to create k8s cluster and manage control plane, therefore does not contain any info about worker nodes
- Use decort_k8s_wg to add/delete/manage worker nodes with decort_k8s_cp
- Added "ignore_k8s" field to "decort_kvmvm_list" data source.
- Set to true to list all computes not associated with any k8s cluster.

@ -8,7 +8,7 @@ ZIPDIR = ./zip
BINARY=${NAME}.exe
WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH}
MAINPATH = ./cmd/decort/
VERSION=4.1.1
VERSION=4.2.0
#OS_ARCH=darwin_amd64
OS_ARCH=windows_amd64
#OS_ARCH=linux_amd64

@ -58,6 +58,7 @@ func newResourcesMap() map[string]*schema.Resource {
"decort_pfw": pfw.ResourcePfw(),
"decort_k8s": k8s.ResourceK8s(),
"decort_k8s_wg": k8s.ResourceK8sWg(),
"decort_k8s_cp": k8s.ResourceK8sCP(),
"decort_snapshot": snapshot.ResourceSnapshot(),
"decort_account": account.ResourceAccount(),
"decort_bservice": bservice.ResourceBasicService(),

@ -303,6 +303,43 @@ func flattenK8sList(d *schema.ResourceData, k8sItems k8s.ListK8SClusters) {
d.Set("items", flattenK8sItems(k8sItems))
}
func flattenResourceK8sCP(d *schema.ResourceData, k8s k8s.RecordK8S, masters []compute.RecordCompute) {
d.Set("acl", flattenAcl(k8s.ACL))
d.Set("account_id", k8s.AccountID)
d.Set("account_name", k8s.AccountName)
d.Set("k8sci_id", k8s.CIID)
d.Set("bservice_id", k8s.BServiceID)
d.Set("created_by", k8s.CreatedBy)
d.Set("created_time", k8s.CreatedTime)
d.Set("deleted_by", k8s.DeletedBy)
d.Set("deleted_time", k8s.DeletedTime)
d.Set("k8s_ci_name", k8s.K8CIName)
d.Set("with_lb", k8s.LBID != 0)
d.Set("lb_id", k8s.LBID)
d.Set("k8s_id", k8s.ID)
d.Set("name", k8s.Name)
d.Set("rg_id", k8s.RGID)
d.Set("rg_name", k8s.RGName)
d.Set("status", k8s.Status)
d.Set("tech_status", k8s.TechStatus)
d.Set("updated_by", k8s.UpdatedBy)
d.Set("updated_time", k8s.UpdatedTime)
d.Set("network_plugin", k8s.NetworkPlugin)
flattenCPParams(d, k8s.K8SGroups.Masters, masters)
}
func flattenCPParams(d *schema.ResourceData, mastersGroup k8s.MasterGroup, masters []compute.RecordCompute) {
d.Set("cpu", mastersGroup.CPU)
d.Set("detailed_info", flattenDetailedInfo(mastersGroup.DetailedInfo, masters))
d.Set("disk", mastersGroup.Disk)
d.Set("master_id", mastersGroup.ID)
d.Set("master_name", mastersGroup.Name)
d.Set("num", mastersGroup.Num)
d.Set("ram", mastersGroup.RAM)
d.Set("master_id", mastersGroup.ID)
}
func flattenResourceK8s(d *schema.ResourceData, k8s k8s.RecordK8S, masters []compute.RecordCompute, workers []compute.RecordCompute) {
wg_name := k8s.K8SGroups.Workers[0].Name

@ -34,6 +34,7 @@ package k8s
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
func nodeMasterDefault() K8sNodeRecord {
@ -72,7 +73,8 @@ func mastersSchemaMake() map[string]*schema.Schema {
masters["num"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
Description: "Number of nodes to create.",
ValidateFunc: validation.IntInSlice([]int{1, 3}),
Description: "Number of nodes to create. Can be either 1 or 3",
}
masters["sep_id"] = &schema.Schema{
Type: schema.TypeInt,

@ -410,13 +410,21 @@ func resourceK8sUpdate(ctx context.Context, d *schema.ResourceData, m interface{
}
}
updateReq := k8s.UpdateRequest{K8SID: cluster.ID}
doBasicUpdate := false
if d.HasChange("name") {
req := k8s.UpdateRequest{
K8SID: cluster.ID,
Name: d.Get("name").(string),
updateReq.Name = d.Get("name").(string)
doBasicUpdate = true
}
if d.HasChange("desc") {
updateReq.Description = d.Get("desc").(string)
doBasicUpdate = true
}
_, err := c.CloudAPI().K8S().Update(ctx, req)
if doBasicUpdate {
_, err := c.CloudAPI().K8S().Update(ctx, updateReq)
if err != nil {
return diag.FromErr(err)
}

@ -0,0 +1,715 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Tim Tkachev, <tvtkachev@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package k8s
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/tasks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
)
func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceK8sControlPlaneCreate: called with name %s, rg %d", d.Get("name").(string), d.Get("rg_id").(int))
haveRGID, err := existRGID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveRGID {
return diag.Errorf("resourceK8sCreate: can't create k8s cluster because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
}
haveK8sciID, err := existK8sCIID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveK8sciID {
return diag.Errorf("resourceK8sCreate: can't create k8s cluster because K8sCIID %d is not allowed or does not exist", d.Get("k8sci_id").(int))
}
if _, ok := d.GetOk("extnet_id"); ok {
haveExtNetID, err := existExtNetID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveExtNetID {
return diag.Errorf("resourceK8sCreate: can't create k8s cluster because ExtNetID %d is not allowed or does not exist", d.Get("extnet_id").(int))
}
}
c := m.(*controller.ControllerCfg)
createReq := k8s.CreateRequest{}
createReq.Name = d.Get("name").(string)
createReq.RGID = uint64(d.Get("rg_id").(int))
createReq.K8SCIID = uint64(d.Get("k8sci_id").(int))
createReq.WorkerGroupName = "temp"
createReq.NetworkPlugin = d.Get("network_plugin").(string)
if num, ok := d.GetOk("num"); ok {
createReq.MasterNum = uint(num.(int))
} else {
createReq.MasterNum = 1
}
if cpu, ok := d.GetOk("cpu"); ok {
createReq.MasterCPU = uint(cpu.(int))
} else {
createReq.MasterCPU = 2
}
if ram, ok := d.GetOk("ram"); ok {
createReq.MasterRAM = uint(ram.(int))
} else {
createReq.MasterRAM = 2048
}
if disk, ok := d.GetOk("disk"); ok {
createReq.MasterDisk = uint(disk.(int))
} else {
createReq.MasterDisk = 0
}
if sepId, ok := d.GetOk("sep_id"); ok {
createReq.MasterSEPID = uint64(sepId.(int))
}
if sepPool, ok := d.GetOk("sep_pool"); ok {
createReq.MasterSEPPool = sepPool.(string)
}
if withLB, ok := d.GetOk("with_lb"); ok {
createReq.WithLB = withLB.(bool)
} else {
createReq.WithLB = true
}
if extNet, ok := d.GetOk("extnet_id"); ok {
createReq.ExtNetID = uint64(extNet.(int))
} else {
createReq.ExtNetID = 0
}
if desc, ok := d.GetOk("desc"); ok {
createReq.Description = desc.(string)
}
resp, err := c.CloudAPI().K8S().Create(ctx, createReq)
if err != nil {
return diag.FromErr(err)
}
taskReq := tasks.GetRequest{
AuditID: strings.Trim(resp, `"`),
}
for {
task, err := c.CloudAPI().Tasks().Get(ctx, taskReq)
if err != nil {
return diag.FromErr(err)
}
log.Debugf("resourceK8sControlPlaneCreate: instance creating - %s", task.Stage)
if task.Completed {
if task.Error != "" {
return diag.FromErr(fmt.Errorf("cannot create k8s instance: %v", task.Error))
}
d.SetId(strconv.Itoa(int(task.Result)))
break
}
time.Sleep(time.Second * 10)
}
cluster, err := utilityK8sCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
delWGReq := k8s.WorkersGroupDeleteRequest{
K8SID: cluster.ID,
WorkersGroupID: cluster.K8SGroups.Workers[0].ID,
}
_, err = c.CloudAPI().K8S().WorkersGroupDelete(ctx, delWGReq)
if err != nil {
return diag.FromErr(err)
}
return resourceK8sCPRead(ctx, d, m)
}
func resourceK8sCPRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
cluster, err := utilityK8sCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
hasChanged := false
switch cluster.Status {
case status.Modeled:
return diag.Errorf("The k8s cluster is in status: %s, please, contact support for more information", cluster.Status)
case status.Creating:
case status.Created:
case status.Deleting:
case status.Deleted:
id, _ := strconv.ParseUint(d.Id(), 10, 64)
restoreReq := k8s.RestoreRequest{
K8SID: id,
}
_, err := c.CloudAPI().K8S().Restore(ctx, restoreReq)
if err != nil {
return diag.FromErr(err)
}
enableReq := k8s.DisableEnableRequest{
K8SID: id,
}
_, err = c.CloudAPI().K8S().Enable(ctx, enableReq)
if err != nil {
return diag.FromErr(err)
}
hasChanged = true
case status.Destroying:
return diag.Errorf("The k8s cluster is in progress with status: %s", cluster.Status)
case status.Destroyed:
d.SetId("")
return resourceK8sCreate(ctx, d, m)
case status.Enabling:
case status.Enabled:
case status.Disabling:
case status.Disabled:
log.Debugf("The k8s cluster is in status: %s, troubles may occur with update. Please, enable compute first.", cluster.Status)
case status.Restoring:
}
if hasChanged {
cluster, err = utilityK8sCheckPresence(ctx, d, m)
if cluster == nil {
d.SetId("")
if err != nil {
return diag.FromErr(err)
}
return nil
}
}
if d.Get("start").(bool) {
if cluster.TechStatus == "STOPPED" {
req := k8s.StartRequest{
K8SID: cluster.ID,
}
_, err := c.CloudAPI().K8S().Start(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
k8sList, err := utilityK8sListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
curK8s := k8s.ItemK8SCluster{}
for _, k8sCluster := range k8sList {
if k8sCluster.ID == cluster.ID {
curK8s = k8sCluster
}
}
if curK8s.ID == 0 {
return diag.Errorf("Cluster with id %d not found", cluster.ID)
}
d.Set("vins_id", curK8s.VINSID)
masterComputeList := make([]compute.RecordCompute, 0, len(cluster.K8SGroups.Masters.DetailedInfo))
for _, masterNode := range cluster.K8SGroups.Masters.DetailedInfo {
compute, err := utilityComputeCheckPresence(ctx, d, m, masterNode.ID)
if err != nil {
return diag.FromErr(err)
}
masterComputeList = append(masterComputeList, *compute)
}
var warnings dc.Warnings
if _, ok := d.GetOk("k8s_id"); !ok {
for _, worker := range cluster.K8SGroups.Workers {
err := fmt.Errorf("Found worker-group with ID %d. Make sure to import it to decort_k8s_wg resource if you wish to manage it", worker.ID)
warnings.Add(err)
}
}
flattenResourceK8sCP(d, *cluster, masterComputeList)
lbGetReq := lb.GetRequest{
LBID: cluster.LBID,
}
lb, err := c.CloudAPI().LB().Get(ctx, lbGetReq)
if err != nil {
return diag.FromErr(err)
}
d.Set("extnet_id", lb.ExtNetID)
d.Set("lb_ip", lb.PrimaryNode.FrontendIP)
kubeconfigReq := k8s.GetConfigRequest{
K8SID: cluster.ID,
}
kubeconfig, err := c.CloudAPI().K8S().GetConfig(ctx, kubeconfigReq)
if err != nil {
log.Warnf("could not get kubeconfig: %v", err)
}
d.Set("kubeconfig", kubeconfig)
return warnings.Get()
}
func resourceK8sCPUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceK8sControlPlaneUpdate: called with id %s, rg %d", d.Id(), d.Get("rg_id").(int))
c := m.(*controller.ControllerCfg)
haveRGID, err := existRGID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveRGID {
return diag.Errorf("resourceK8sUpdate: can't update k8s cluster because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
}
haveK8sciID, err := existK8sCIID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveK8sciID {
return diag.Errorf("resourceK8sUpdate: can't update k8s cluster because K8sCIID %d is not allowed or does not exist", d.Get("k8sci_id").(int))
}
cluster, err := utilityK8sCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
hasChanged := false
switch cluster.Status {
case status.Modeled:
return diag.Errorf("The k8s cluster is in status: %s, please, contact support for more information", cluster.Status)
case status.Creating:
case status.Created:
case status.Deleting:
case status.Deleted:
id, _ := strconv.ParseUint(d.Id(), 10, 64)
restoreReq := k8s.RestoreRequest{
K8SID: id,
}
_, err := c.CloudAPI().K8S().Restore(ctx, restoreReq)
if err != nil {
return diag.FromErr(err)
}
enableReq := k8s.DisableEnableRequest{
K8SID: id,
}
_, err = c.CloudAPI().K8S().Enable(ctx, enableReq)
if err != nil {
return diag.FromErr(err)
}
hasChanged = true
case status.Destroying:
return diag.Errorf("The k8s cluster is in progress with status: %s", cluster.Status)
case status.Destroyed:
d.SetId("")
return resourceK8sCreate(ctx, d, m)
case status.Enabling:
case status.Enabled:
case status.Disabling:
case status.Disabled:
log.Debugf("The k8s cluster is in status: %s, troubles may occur with update. Please, enable compute first.", cluster.Status)
case status.Restoring:
}
if hasChanged {
cluster, err = utilityK8sCheckPresence(ctx, d, m)
if cluster == nil {
d.SetId("")
if err != nil {
return diag.FromErr(err)
}
return nil
}
}
updateReq := k8s.UpdateRequest{K8SID: cluster.ID}
doBasicUpdate := false
if d.HasChange("name") {
updateReq.Name = d.Get("name").(string)
doBasicUpdate = true
}
if d.HasChange("desc") {
updateReq.Description = d.Get("desc").(string)
doBasicUpdate = true
}
if doBasicUpdate {
_, err := c.CloudAPI().K8S().Update(ctx, updateReq)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("start") {
if d.Get("start").(bool) {
if cluster.TechStatus == "STOPPED" {
req := k8s.StartRequest{
K8SID: cluster.ID,
}
_, err := c.CloudAPI().K8S().Start(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
} else {
if cluster.TechStatus == "STARTED" {
req := k8s.StopRequest{
K8SID: cluster.ID,
}
_, err := c.CloudAPI().K8S().Stop(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
}
if d.HasChange("num") {
oldVal, newVal := d.GetChange("num")
if oldVal.(int) > newVal.(int) {
ids := make([]string, 0)
for i := oldVal.(int) - 1; i >= newVal.(int); i-- {
id := cluster.K8SGroups.Masters.DetailedInfo[i].ID
ids = append(ids, strconv.FormatUint(id, 10))
}
req := k8s.DeleteMasterFromGroupRequest{
K8SID: cluster.ID,
MasterGroupID: cluster.K8SGroups.Masters.ID,
MasterIDs: ids,
}
_, err := c.CloudAPI().K8S().DeleteMasterFromGroup(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
return nil
}
func resourceK8sCPDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceK8sControlPlaneDelete: called with id %s, rg %d", d.Id(), d.Get("rg_id").(int))
cluster, err := utilityK8sCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
req := k8s.DeleteRequest{
K8SID: cluster.ID,
Permanently: true,
}
_, err = c.CloudAPI().K8S().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
return nil
}
func resourceK8sCPSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of the cluster.",
},
"rg_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "Resource group ID that this instance belongs to.",
},
"k8sci_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "ID of the k8s catalog item to base this instance on.",
},
"network_plugin": {
Type: schema.TypeString,
Required: true,
Description: "Network plugin to be used",
ValidateFunc: validation.StringInSlice([]string{"flannel", "weavenet", "calico"}, true),
},
"num": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ValidateFunc: validation.IntInSlice([]int{1, 3}),
Description: "Number of VMs to create. Can be either 1 or 3",
},
"cpu": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Node CPU count.",
},
"ram": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Node RAM in MB.",
},
"disk": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Node boot disk size in GB.",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Storage Endpoint ID",
},
"sep_pool": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Storage Endpoint Pool",
},
"with_lb": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "Create k8s with load balancer if true.",
},
"extnet_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
Description: "ID of the external network to connect workers to. If omitted network will be chosen by the platfom.",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Description: "Text description of this instance.",
},
"start": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "Start k8s cluster.",
},
"detailed_info": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: detailedInfoSchemaMake(),
},
},
"master_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Master group ID.",
},
"master_name": {
Type: schema.TypeString,
Computed: true,
Description: "Master group name.",
},
"acl": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: aclGroupSchemaMake(),
},
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"bservice_id": {
Type: schema.TypeInt,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"k8s_ci_name": {
Type: schema.TypeString,
Computed: true,
},
"lb_id": {
Type: schema.TypeInt,
Computed: true,
},
"k8s_id": {
Type: schema.TypeInt,
Computed: true,
},
"lb_ip": {
Type: schema.TypeString,
Computed: true,
Description: "IP address of default load balancer.",
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
"kubeconfig": {
Type: schema.TypeString,
Computed: true,
Description: "Kubeconfig for cluster access.",
},
"vins_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of default vins for this instace.",
},
}
}
func ResourceK8sCP() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourceK8sCPCreate,
ReadContext: resourceK8sCPRead,
UpdateContext: resourceK8sCPUpdate,
DeleteContext: resourceK8sCPDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout30m,
Read: &constants.Timeout600s,
Update: &constants.Timeout600s,
Delete: &constants.Timeout600s,
Default: &constants.Timeout600s,
},
Schema: resourceK8sCPSchemaMake(),
}
}

@ -47,9 +47,16 @@ func dataSourceComputeListRead(ctx context.Context, d *schema.ResourceData, m in
return diag.FromErr(err)
}
result := computeList
if d.Get("ignore_k8s").(bool) {
// matches automatically generated names like "s234-g2134-c1" etc
result = matchComputes(computeList)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenComputeList(computeList))
d.Set("items", flattenComputeList(result))
return nil
}
@ -325,7 +332,7 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
}
}
func dataSourceCompputeListSchemaMake() map[string]*schema.Schema {
func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"includedeleted": {
Type: schema.TypeBool,
@ -339,7 +346,12 @@ func dataSourceCompputeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Optional: true,
},
"ignore_k8s": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "If set to true, ignores any VMs associated with any k8s cluster",
},
"items": {
Type: schema.TypeList,
Computed: true,
@ -362,6 +374,6 @@ func DataSourceComputeList() *schema.Resource {
Default: &constants.Timeout60s,
},
Schema: dataSourceCompputeListSchemaMake(),
Schema: dataSourceComputeListSchemaMake(),
}
}

@ -34,6 +34,7 @@ package kvmvm
import (
"context"
"regexp"
"strconv"
log "github.com/sirupsen/logrus"
@ -43,6 +44,16 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func matchComputes(computeList compute.ListComputes) compute.ListComputes {
matched, _ := regexp.Compile("[a-zA-Z]+\\d+-[a-zA-Z]+\\d+-[a-zA-Z]+\\d+")
result := computeList.FilterFunc(func(ic compute.ItemCompute) bool {
res := matched.Match([]byte(ic.Name))
return !res
})
return result
}
func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error {
c := m.(*controller.ControllerCfg)

@ -43,6 +43,11 @@ data "decort_kvmvm_list" "compute_list" {
#тип - число
#если не задан - выводятся все доступные данные
size = 1
# Включить в список вывода ВМ, принадлежащие k8s кластерам (при значении параметра true)
# опциональный параметр
# bool (default = false)
ignore_k8s = true
}
output "output" {

@ -0,0 +1,97 @@
#Расскомментируйте этот код,
#и внесите необходимые правки в версию и путь,
#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером
/*
terraform {
required_providers {
decort = {
source = "basis/decort/decort"
version = "<VERSION>"
}
}
}
*/
provider "decort" {
authenticator = "oauth2"
oauth2_url = "https://sso.digitalenergy.online"
controller_url = "https://mr4.digitalenergy.online"
app_id = ""
app_secret = ""
}
resource "decort_k8s_cp" "cp" {
# Название кластера
# Обязательный параметр
# string
name = "k8s-cp"
# K8sCI ID
# Обязательный параметр
# int
k8sci_id = 55
# Плагин сети (flannel, weavenet или calico)
# Обязательный параметр
# string
network_plugin = "flannel"
# ID ресурсной группы
# Обязательный параметр
# int
rg_id = 1387
# Кол-во ядер мастер-узла
# Опциональный параметр
# int
cpu = 2
# Объем RAM мастер-узла
# Опциональный параметр
# int
ram = 2048
# Кол-во ВМ мастер-узла (1 или 3)
# Опциональный параметр
# int
num = 1
# Размер диска мастер-узла
# Опциональный параметр
# int
disk = 10
# Описание кластера
# Опциональный параметр
# string
desc = "<DESCRIPTION>"
# ID внешней сети
# Опциональный параметр
# id
extnet_id = 13
# Storage Endpoint ID
# Опциональный параметр
# id
sep_id = 0
# SEP Pool
# Опциональный параметр
# string
sep_pool = "pool"
# Старт/Стоп кластера
# Опциональный параметр
# bool
start = true
# Создать кластер с/без балансировщика нагрузки
# Опциональный параметр
# bool
with_lb = true
}
output "cp_out" {
value = decort_k8s_cp.cp
}
Loading…
Cancel
Save