Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 6365f63fc1 | |||
| 85ce76564f | |||
| 928481d26f | |||
| 0e64974821 | |||
| 371bb0d90f |
13
CHANGELOG.md
13
CHANGELOG.md
@@ -1,13 +1,4 @@
|
|||||||
## Version 4.1.0
|
## Version 4.2.3
|
||||||
|
|
||||||
## Features
|
|
||||||
- Added cloudapi/flipgroup functionality to provider:
|
|
||||||
- decort_flipgroup data source - read info about created flipgroup
|
|
||||||
- decort_flipgroup_list data source - read info about all created flipgroups
|
|
||||||
- decort_flipgroup resource - manage flipgroup instance
|
|
||||||
|
|
||||||
## Bug Fixes
|
## Bug Fixes
|
||||||
- Increased resource_vins timeouts
|
- Made "ipcidr" field in decort_vins resource optional
|
||||||
- Automated compute start/stop when managing networks
|
|
||||||
- Removed reading of default generated label "workersGroupName=" in resource_k8s, resource_k8s_wg in tfstate
|
|
||||||
- Updated samples for flipgroups, image, pfw, k8s resources and data sources
|
|
||||||
|
|||||||
2
Makefile
2
Makefile
@@ -8,7 +8,7 @@ ZIPDIR = ./zip
|
|||||||
BINARY=${NAME}.exe
|
BINARY=${NAME}.exe
|
||||||
WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH}
|
WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH}
|
||||||
MAINPATH = ./cmd/decort/
|
MAINPATH = ./cmd/decort/
|
||||||
VERSION=4.1.0
|
VERSION=4.2.3
|
||||||
#OS_ARCH=darwin_amd64
|
#OS_ARCH=darwin_amd64
|
||||||
OS_ARCH=windows_amd64
|
OS_ARCH=windows_amd64
|
||||||
#OS_ARCH=linux_amd64
|
#OS_ARCH=linux_amd64
|
||||||
|
|||||||
2
go.mod
2
go.mod
@@ -9,7 +9,7 @@ require (
|
|||||||
github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1
|
github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1
|
||||||
github.com/sirupsen/logrus v1.9.0
|
github.com/sirupsen/logrus v1.9.0
|
||||||
golang.org/x/net v0.5.0
|
golang.org/x/net v0.5.0
|
||||||
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.3
|
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.7
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
|||||||
4
go.sum
4
go.sum
@@ -332,5 +332,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
|||||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.3 h1:jrBl90lRfp34bE3m30N3mYIDTSlaPySuo+pE7bK4eOI=
|
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.7 h1:1lHhvOsMX5iFQ4z2qmVT7cORbCr+hTeEH9Lk1E2liYE=
|
||||||
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.3/go.mod h1:szsTGa73O75ckCWVGJPvTtRbhA/ubuYrYhMkPjvHlmE=
|
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.7/go.mod h1:szsTGa73O75ckCWVGJPvTtRbhA/ubuYrYhMkPjvHlmE=
|
||||||
|
|||||||
@@ -158,7 +158,6 @@ func ControllerConfigure(d *schema.ResourceData) (*ControllerCfg, error) {
|
|||||||
Username: ret_config.legacy_user,
|
Username: ret_config.legacy_user,
|
||||||
Password: ret_config.legacy_password,
|
Password: ret_config.legacy_password,
|
||||||
DecortURL: ret_config.controller_url,
|
DecortURL: ret_config.controller_url,
|
||||||
Retries: 0,
|
|
||||||
SSLSkipVerify: allow_unverified_ssl,
|
SSLSkipVerify: allow_unverified_ssl,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -58,6 +58,7 @@ func newResourcesMap() map[string]*schema.Resource {
|
|||||||
"decort_pfw": pfw.ResourcePfw(),
|
"decort_pfw": pfw.ResourcePfw(),
|
||||||
"decort_k8s": k8s.ResourceK8s(),
|
"decort_k8s": k8s.ResourceK8s(),
|
||||||
"decort_k8s_wg": k8s.ResourceK8sWg(),
|
"decort_k8s_wg": k8s.ResourceK8sWg(),
|
||||||
|
"decort_k8s_cp": k8s.ResourceK8sCP(),
|
||||||
"decort_snapshot": snapshot.ResourceSnapshot(),
|
"decort_snapshot": snapshot.ResourceSnapshot(),
|
||||||
"decort_account": account.ResourceAccount(),
|
"decort_account": account.ResourceAccount(),
|
||||||
"decort_bservice": bservice.ResourceBasicService(),
|
"decort_bservice": bservice.ResourceBasicService(),
|
||||||
|
|||||||
@@ -303,6 +303,43 @@ func flattenK8sList(d *schema.ResourceData, k8sItems k8s.ListK8SClusters) {
|
|||||||
d.Set("items", flattenK8sItems(k8sItems))
|
d.Set("items", flattenK8sItems(k8sItems))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func flattenResourceK8sCP(d *schema.ResourceData, k8s k8s.RecordK8S, masters []compute.RecordCompute) {
|
||||||
|
d.Set("acl", flattenAcl(k8s.ACL))
|
||||||
|
d.Set("account_id", k8s.AccountID)
|
||||||
|
d.Set("account_name", k8s.AccountName)
|
||||||
|
d.Set("k8sci_id", k8s.CIID)
|
||||||
|
d.Set("bservice_id", k8s.BServiceID)
|
||||||
|
d.Set("created_by", k8s.CreatedBy)
|
||||||
|
d.Set("created_time", k8s.CreatedTime)
|
||||||
|
d.Set("deleted_by", k8s.DeletedBy)
|
||||||
|
d.Set("deleted_time", k8s.DeletedTime)
|
||||||
|
d.Set("k8s_ci_name", k8s.K8CIName)
|
||||||
|
d.Set("with_lb", k8s.LBID != 0)
|
||||||
|
d.Set("lb_id", k8s.LBID)
|
||||||
|
d.Set("k8s_id", k8s.ID)
|
||||||
|
d.Set("name", k8s.Name)
|
||||||
|
d.Set("rg_id", k8s.RGID)
|
||||||
|
d.Set("rg_name", k8s.RGName)
|
||||||
|
d.Set("status", k8s.Status)
|
||||||
|
d.Set("tech_status", k8s.TechStatus)
|
||||||
|
d.Set("updated_by", k8s.UpdatedBy)
|
||||||
|
d.Set("updated_time", k8s.UpdatedTime)
|
||||||
|
d.Set("network_plugin", k8s.NetworkPlugin)
|
||||||
|
|
||||||
|
flattenCPParams(d, k8s.K8SGroups.Masters, masters)
|
||||||
|
}
|
||||||
|
|
||||||
|
func flattenCPParams(d *schema.ResourceData, mastersGroup k8s.MasterGroup, masters []compute.RecordCompute) {
|
||||||
|
d.Set("cpu", mastersGroup.CPU)
|
||||||
|
d.Set("detailed_info", flattenDetailedInfo(mastersGroup.DetailedInfo, masters))
|
||||||
|
d.Set("disk", mastersGroup.Disk)
|
||||||
|
d.Set("master_id", mastersGroup.ID)
|
||||||
|
d.Set("master_name", mastersGroup.Name)
|
||||||
|
d.Set("num", mastersGroup.Num)
|
||||||
|
d.Set("ram", mastersGroup.RAM)
|
||||||
|
d.Set("master_id", mastersGroup.ID)
|
||||||
|
}
|
||||||
|
|
||||||
func flattenResourceK8s(d *schema.ResourceData, k8s k8s.RecordK8S, masters []compute.RecordCompute, workers []compute.RecordCompute) {
|
func flattenResourceK8s(d *schema.ResourceData, k8s k8s.RecordK8S, masters []compute.RecordCompute, workers []compute.RecordCompute) {
|
||||||
wg_name := k8s.K8SGroups.Workers[0].Name
|
wg_name := k8s.K8SGroups.Workers[0].Name
|
||||||
|
|
||||||
@@ -335,7 +372,7 @@ func flattenResourceK8s(d *schema.ResourceData, k8s k8s.RecordK8S, masters []com
|
|||||||
func flattenWg(d *schema.ResourceData, wg k8s.ItemK8SGroup, computes []compute.RecordCompute) {
|
func flattenWg(d *schema.ResourceData, wg k8s.ItemK8SGroup, computes []compute.RecordCompute) {
|
||||||
labels := make([]string, 0)
|
labels := make([]string, 0)
|
||||||
for _, label := range wg.Labels {
|
for _, label := range wg.Labels {
|
||||||
if strings.HasPrefix(label, "workerGroupName") {
|
if strings.HasPrefix(label, "workersGroupName") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ package k8s
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||||
)
|
)
|
||||||
|
|
||||||
func nodeMasterDefault() K8sNodeRecord {
|
func nodeMasterDefault() K8sNodeRecord {
|
||||||
@@ -70,9 +71,10 @@ func parseDefaultNode(nodeList []interface{}) K8sNodeRecord {
|
|||||||
func mastersSchemaMake() map[string]*schema.Schema {
|
func mastersSchemaMake() map[string]*schema.Schema {
|
||||||
masters := masterGroupSchemaMake()
|
masters := masterGroupSchemaMake()
|
||||||
masters["num"] = &schema.Schema{
|
masters["num"] = &schema.Schema{
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
Description: "Number of nodes to create.",
|
ValidateFunc: validation.IntInSlice([]int{1, 3}),
|
||||||
|
Description: "Number of nodes to create. Can be either 1 or 3",
|
||||||
}
|
}
|
||||||
masters["sep_id"] = &schema.Schema{
|
masters["sep_id"] = &schema.Schema{
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
|
|||||||
@@ -143,11 +143,7 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if withLB, ok := d.GetOk("with_lb"); ok {
|
createReq.WithLB = d.Get("with_lb").(bool)
|
||||||
createReq.WithLB = withLB.(bool)
|
|
||||||
} else {
|
|
||||||
createReq.WithLB = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if extNet, ok := d.GetOk("extnet_id"); ok {
|
if extNet, ok := d.GetOk("extnet_id"); ok {
|
||||||
createReq.ExtNetID = uint64(extNet.(int))
|
createReq.ExtNetID = uint64(extNet.(int))
|
||||||
@@ -410,13 +406,21 @@ func resourceK8sUpdate(ctx context.Context, d *schema.ResourceData, m interface{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("name") {
|
updateReq := k8s.UpdateRequest{K8SID: cluster.ID}
|
||||||
req := k8s.UpdateRequest{
|
doBasicUpdate := false
|
||||||
K8SID: cluster.ID,
|
|
||||||
Name: d.Get("name").(string),
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := c.CloudAPI().K8S().Update(ctx, req)
|
if d.HasChange("name") {
|
||||||
|
updateReq.Name = d.Get("name").(string)
|
||||||
|
doBasicUpdate = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("desc") {
|
||||||
|
updateReq.Description = d.Get("desc").(string)
|
||||||
|
doBasicUpdate = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if doBasicUpdate {
|
||||||
|
_, err := c.CloudAPI().K8S().Update(ctx, updateReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|||||||
711
internal/service/cloudapi/k8s/resource_k8s_cp.go
Normal file
711
internal/service/cloudapi/k8s/resource_k8s_cp.go
Normal file
@@ -0,0 +1,711 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||||
|
Authors:
|
||||||
|
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||||
|
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||||
|
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||||
|
Tim Tkachev, <tvtkachev@basistech.ru>
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||||
|
Orchestration Technology) with Terraform by Hashicorp.
|
||||||
|
|
||||||
|
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||||
|
|
||||||
|
Please see README.md to learn where to place source code so that it
|
||||||
|
builds seamlessly.
|
||||||
|
|
||||||
|
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||||
|
*/
|
||||||
|
|
||||||
|
package k8s
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||||
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/tasks"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
log.Debugf("resourceK8sControlPlaneCreate: called with name %s, rg %d", d.Get("name").(string), d.Get("rg_id").(int))
|
||||||
|
|
||||||
|
haveRGID, err := existRGID(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !haveRGID {
|
||||||
|
return diag.Errorf("resourceK8sCreate: can't create k8s cluster because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
|
||||||
|
}
|
||||||
|
|
||||||
|
haveK8sciID, err := existK8sCIID(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !haveK8sciID {
|
||||||
|
return diag.Errorf("resourceK8sCreate: can't create k8s cluster because K8sCIID %d is not allowed or does not exist", d.Get("k8sci_id").(int))
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := d.GetOk("extnet_id"); ok {
|
||||||
|
haveExtNetID, err := existExtNetID(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !haveExtNetID {
|
||||||
|
return diag.Errorf("resourceK8sCreate: can't create k8s cluster because ExtNetID %d is not allowed or does not exist", d.Get("extnet_id").(int))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
createReq := k8s.CreateRequest{}
|
||||||
|
|
||||||
|
createReq.Name = d.Get("name").(string)
|
||||||
|
createReq.RGID = uint64(d.Get("rg_id").(int))
|
||||||
|
createReq.K8SCIID = uint64(d.Get("k8sci_id").(int))
|
||||||
|
createReq.WorkerGroupName = "temp"
|
||||||
|
createReq.NetworkPlugin = d.Get("network_plugin").(string)
|
||||||
|
|
||||||
|
if num, ok := d.GetOk("num"); ok {
|
||||||
|
createReq.MasterNum = uint(num.(int))
|
||||||
|
} else {
|
||||||
|
createReq.MasterNum = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if cpu, ok := d.GetOk("cpu"); ok {
|
||||||
|
createReq.MasterCPU = uint(cpu.(int))
|
||||||
|
} else {
|
||||||
|
createReq.MasterCPU = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
if ram, ok := d.GetOk("ram"); ok {
|
||||||
|
createReq.MasterRAM = uint(ram.(int))
|
||||||
|
} else {
|
||||||
|
createReq.MasterRAM = 2048
|
||||||
|
}
|
||||||
|
|
||||||
|
if disk, ok := d.GetOk("disk"); ok {
|
||||||
|
createReq.MasterDisk = uint(disk.(int))
|
||||||
|
} else {
|
||||||
|
createReq.MasterDisk = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if sepId, ok := d.GetOk("sep_id"); ok {
|
||||||
|
createReq.MasterSEPID = uint64(sepId.(int))
|
||||||
|
}
|
||||||
|
|
||||||
|
if sepPool, ok := d.GetOk("sep_pool"); ok {
|
||||||
|
createReq.MasterSEPPool = sepPool.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
createReq.WithLB = d.Get("with_lb").(bool)
|
||||||
|
|
||||||
|
if extNet, ok := d.GetOk("extnet_id"); ok {
|
||||||
|
createReq.ExtNetID = uint64(extNet.(int))
|
||||||
|
} else {
|
||||||
|
createReq.ExtNetID = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if desc, ok := d.GetOk("desc"); ok {
|
||||||
|
createReq.Description = desc.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.CloudAPI().K8S().Create(ctx, createReq)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
taskReq := tasks.GetRequest{
|
||||||
|
AuditID: strings.Trim(resp, `"`),
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
task, err := c.CloudAPI().Tasks().Get(ctx, taskReq)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("resourceK8sControlPlaneCreate: instance creating - %s", task.Stage)
|
||||||
|
|
||||||
|
if task.Completed {
|
||||||
|
if task.Error != "" {
|
||||||
|
return diag.FromErr(fmt.Errorf("cannot create k8s instance: %v", task.Error))
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(strconv.Itoa(int(task.Result)))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second * 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster, err := utilityK8sCheckPresence(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
delWGReq := k8s.WorkersGroupDeleteRequest{
|
||||||
|
K8SID: cluster.ID,
|
||||||
|
WorkersGroupID: cluster.K8SGroups.Workers[0].ID,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = c.CloudAPI().K8S().WorkersGroupDelete(ctx, delWGReq)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceK8sCPRead(ctx, d, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceK8sCPRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
cluster, err := utilityK8sCheckPresence(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
|
||||||
|
hasChanged := false
|
||||||
|
|
||||||
|
switch cluster.Status {
|
||||||
|
case status.Modeled:
|
||||||
|
return diag.Errorf("The k8s cluster is in status: %s, please, contact support for more information", cluster.Status)
|
||||||
|
case status.Creating:
|
||||||
|
case status.Created:
|
||||||
|
case status.Deleting:
|
||||||
|
case status.Deleted:
|
||||||
|
id, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||||
|
restoreReq := k8s.RestoreRequest{
|
||||||
|
K8SID: id,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := c.CloudAPI().K8S().Restore(ctx, restoreReq)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
enableReq := k8s.DisableEnableRequest{
|
||||||
|
K8SID: id,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = c.CloudAPI().K8S().Enable(ctx, enableReq)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hasChanged = true
|
||||||
|
case status.Destroying:
|
||||||
|
return diag.Errorf("The k8s cluster is in progress with status: %s", cluster.Status)
|
||||||
|
case status.Destroyed:
|
||||||
|
d.SetId("")
|
||||||
|
return resourceK8sCreate(ctx, d, m)
|
||||||
|
case status.Enabling:
|
||||||
|
case status.Enabled:
|
||||||
|
case status.Disabling:
|
||||||
|
case status.Disabled:
|
||||||
|
log.Debugf("The k8s cluster is in status: %s, troubles may occur with update. Please, enable compute first.", cluster.Status)
|
||||||
|
case status.Restoring:
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasChanged {
|
||||||
|
cluster, err = utilityK8sCheckPresence(ctx, d, m)
|
||||||
|
if cluster == nil {
|
||||||
|
d.SetId("")
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.Get("start").(bool) {
|
||||||
|
if cluster.TechStatus == "STOPPED" {
|
||||||
|
req := k8s.StartRequest{
|
||||||
|
K8SID: cluster.ID,
|
||||||
|
}
|
||||||
|
_, err := c.CloudAPI().K8S().Start(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
k8sList, err := utilityK8sListCheckPresence(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
curK8s := k8s.ItemK8SCluster{}
|
||||||
|
for _, k8sCluster := range k8sList {
|
||||||
|
if k8sCluster.ID == cluster.ID {
|
||||||
|
curK8s = k8sCluster
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if curK8s.ID == 0 {
|
||||||
|
return diag.Errorf("Cluster with id %d not found", cluster.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("vins_id", curK8s.VINSID)
|
||||||
|
|
||||||
|
masterComputeList := make([]compute.RecordCompute, 0, len(cluster.K8SGroups.Masters.DetailedInfo))
|
||||||
|
for _, masterNode := range cluster.K8SGroups.Masters.DetailedInfo {
|
||||||
|
compute, err := utilityComputeCheckPresence(ctx, d, m, masterNode.ID)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
masterComputeList = append(masterComputeList, *compute)
|
||||||
|
}
|
||||||
|
|
||||||
|
var warnings dc.Warnings
|
||||||
|
|
||||||
|
if _, ok := d.GetOk("k8s_id"); !ok {
|
||||||
|
for _, worker := range cluster.K8SGroups.Workers {
|
||||||
|
err := fmt.Errorf("Found worker-group with ID %d. Make sure to import it to decort_k8s_wg resource if you wish to manage it", worker.ID)
|
||||||
|
warnings.Add(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
flattenResourceK8sCP(d, *cluster, masterComputeList)
|
||||||
|
|
||||||
|
lbGetReq := lb.GetRequest{
|
||||||
|
LBID: cluster.LBID,
|
||||||
|
}
|
||||||
|
|
||||||
|
lb, err := c.CloudAPI().LB().Get(ctx, lbGetReq)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("extnet_id", lb.ExtNetID)
|
||||||
|
d.Set("lb_ip", lb.PrimaryNode.FrontendIP)
|
||||||
|
|
||||||
|
kubeconfigReq := k8s.GetConfigRequest{
|
||||||
|
K8SID: cluster.ID,
|
||||||
|
}
|
||||||
|
|
||||||
|
kubeconfig, err := c.CloudAPI().K8S().GetConfig(ctx, kubeconfigReq)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("could not get kubeconfig: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("kubeconfig", kubeconfig)
|
||||||
|
|
||||||
|
return warnings.Get()
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceK8sCPUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
log.Debugf("resourceK8sControlPlaneUpdate: called with id %s, rg %d", d.Id(), d.Get("rg_id").(int))
|
||||||
|
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
|
||||||
|
haveRGID, err := existRGID(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !haveRGID {
|
||||||
|
return diag.Errorf("resourceK8sUpdate: can't update k8s cluster because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
|
||||||
|
}
|
||||||
|
|
||||||
|
haveK8sciID, err := existK8sCIID(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !haveK8sciID {
|
||||||
|
return diag.Errorf("resourceK8sUpdate: can't update k8s cluster because K8sCIID %d is not allowed or does not exist", d.Get("k8sci_id").(int))
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster, err := utilityK8sCheckPresence(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hasChanged := false
|
||||||
|
|
||||||
|
switch cluster.Status {
|
||||||
|
case status.Modeled:
|
||||||
|
return diag.Errorf("The k8s cluster is in status: %s, please, contact support for more information", cluster.Status)
|
||||||
|
case status.Creating:
|
||||||
|
case status.Created:
|
||||||
|
case status.Deleting:
|
||||||
|
case status.Deleted:
|
||||||
|
id, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||||
|
restoreReq := k8s.RestoreRequest{
|
||||||
|
K8SID: id,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := c.CloudAPI().K8S().Restore(ctx, restoreReq)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
enableReq := k8s.DisableEnableRequest{
|
||||||
|
K8SID: id,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = c.CloudAPI().K8S().Enable(ctx, enableReq)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hasChanged = true
|
||||||
|
case status.Destroying:
|
||||||
|
return diag.Errorf("The k8s cluster is in progress with status: %s", cluster.Status)
|
||||||
|
case status.Destroyed:
|
||||||
|
d.SetId("")
|
||||||
|
return resourceK8sCreate(ctx, d, m)
|
||||||
|
case status.Enabling:
|
||||||
|
case status.Enabled:
|
||||||
|
case status.Disabling:
|
||||||
|
case status.Disabled:
|
||||||
|
log.Debugf("The k8s cluster is in status: %s, troubles may occur with update. Please, enable compute first.", cluster.Status)
|
||||||
|
case status.Restoring:
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasChanged {
|
||||||
|
cluster, err = utilityK8sCheckPresence(ctx, d, m)
|
||||||
|
if cluster == nil {
|
||||||
|
d.SetId("")
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
updateReq := k8s.UpdateRequest{K8SID: cluster.ID}
|
||||||
|
doBasicUpdate := false
|
||||||
|
|
||||||
|
if d.HasChange("name") {
|
||||||
|
updateReq.Name = d.Get("name").(string)
|
||||||
|
doBasicUpdate = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("desc") {
|
||||||
|
updateReq.Description = d.Get("desc").(string)
|
||||||
|
doBasicUpdate = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if doBasicUpdate {
|
||||||
|
_, err := c.CloudAPI().K8S().Update(ctx, updateReq)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("start") {
|
||||||
|
if d.Get("start").(bool) {
|
||||||
|
if cluster.TechStatus == "STOPPED" {
|
||||||
|
req := k8s.StartRequest{
|
||||||
|
K8SID: cluster.ID,
|
||||||
|
}
|
||||||
|
_, err := c.CloudAPI().K8S().Start(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if cluster.TechStatus == "STARTED" {
|
||||||
|
req := k8s.StopRequest{
|
||||||
|
K8SID: cluster.ID,
|
||||||
|
}
|
||||||
|
_, err := c.CloudAPI().K8S().Stop(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("num") {
|
||||||
|
oldVal, newVal := d.GetChange("num")
|
||||||
|
|
||||||
|
if oldVal.(int) > newVal.(int) {
|
||||||
|
ids := make([]string, 0)
|
||||||
|
for i := oldVal.(int) - 1; i >= newVal.(int); i-- {
|
||||||
|
id := cluster.K8SGroups.Masters.DetailedInfo[i].ID
|
||||||
|
ids = append(ids, strconv.FormatUint(id, 10))
|
||||||
|
}
|
||||||
|
|
||||||
|
req := k8s.DeleteMasterFromGroupRequest{
|
||||||
|
K8SID: cluster.ID,
|
||||||
|
MasterGroupID: cluster.K8SGroups.Masters.ID,
|
||||||
|
MasterIDs: ids,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := c.CloudAPI().K8S().DeleteMasterFromGroup(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceK8sCPDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
log.Debugf("resourceK8sControlPlaneDelete: called with id %s, rg %d", d.Id(), d.Get("rg_id").(int))
|
||||||
|
|
||||||
|
cluster, err := utilityK8sCheckPresence(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
req := k8s.DeleteRequest{
|
||||||
|
K8SID: cluster.ID,
|
||||||
|
Permanently: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = c.CloudAPI().K8S().Delete(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceK8sCPSchemaMake() map[string]*schema.Schema {
|
||||||
|
return map[string]*schema.Schema{
|
||||||
|
"name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
Description: "Name of the cluster.",
|
||||||
|
},
|
||||||
|
"rg_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Description: "Resource group ID that this instance belongs to.",
|
||||||
|
},
|
||||||
|
"k8sci_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Description: "ID of the k8s catalog item to base this instance on.",
|
||||||
|
},
|
||||||
|
"network_plugin": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
Description: "Network plugin to be used",
|
||||||
|
ValidateFunc: validation.StringInSlice([]string{"flannel", "weavenet", "calico"}, true),
|
||||||
|
},
|
||||||
|
"num": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ValidateFunc: validation.IntInSlice([]int{1, 3}),
|
||||||
|
Description: "Number of VMs to create. Can be either 1 or 3",
|
||||||
|
},
|
||||||
|
"cpu": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Node CPU count.",
|
||||||
|
},
|
||||||
|
"ram": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Node RAM in MB.",
|
||||||
|
},
|
||||||
|
"disk": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Node boot disk size in GB.",
|
||||||
|
},
|
||||||
|
"sep_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Storage Endpoint ID",
|
||||||
|
},
|
||||||
|
"sep_pool": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Storage Endpoint Pool",
|
||||||
|
},
|
||||||
|
"with_lb": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: true,
|
||||||
|
Description: "Create k8s with load balancer if true.",
|
||||||
|
},
|
||||||
|
"extnet_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Description: "ID of the external network to connect workers to. If omitted network will be chosen by the platfom.",
|
||||||
|
},
|
||||||
|
"desc": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "Text description of this instance.",
|
||||||
|
},
|
||||||
|
"start": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: true,
|
||||||
|
Description: "Start k8s cluster.",
|
||||||
|
},
|
||||||
|
"detailed_info": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: detailedInfoSchemaMake(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"master_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Master group ID.",
|
||||||
|
},
|
||||||
|
"master_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Master group name.",
|
||||||
|
},
|
||||||
|
"acl": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: aclGroupSchemaMake(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"account_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"account_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"bservice_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"created_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"created_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"deleted_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"deleted_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"k8s_ci_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"lb_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"k8s_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"lb_ip": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "IP address of default load balancer.",
|
||||||
|
},
|
||||||
|
"rg_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"tech_status": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"updated_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"updated_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"kubeconfig": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Kubeconfig for cluster access.",
|
||||||
|
},
|
||||||
|
"vins_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "ID of default vins for this instace.",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ResourceK8sCP() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
SchemaVersion: 1,
|
||||||
|
|
||||||
|
CreateContext: resourceK8sCPCreate,
|
||||||
|
ReadContext: resourceK8sCPRead,
|
||||||
|
UpdateContext: resourceK8sCPUpdate,
|
||||||
|
DeleteContext: resourceK8sCPDelete,
|
||||||
|
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
StateContext: schema.ImportStatePassthroughContext,
|
||||||
|
},
|
||||||
|
|
||||||
|
Timeouts: &schema.ResourceTimeout{
|
||||||
|
Create: &constants.Timeout30m,
|
||||||
|
Read: &constants.Timeout600s,
|
||||||
|
Update: &constants.Timeout600s,
|
||||||
|
Delete: &constants.Timeout600s,
|
||||||
|
Default: &constants.Timeout600s,
|
||||||
|
},
|
||||||
|
|
||||||
|
Schema: resourceK8sCPSchemaMake(),
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -47,9 +47,16 @@ func dataSourceComputeListRead(ctx context.Context, d *schema.ResourceData, m in
|
|||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
result := computeList
|
||||||
|
if d.Get("ignore_k8s").(bool) {
|
||||||
|
// matches automatically generated names like "s234-g2134-c1" etc
|
||||||
|
result = matchComputes(computeList)
|
||||||
|
}
|
||||||
|
|
||||||
id := uuid.New()
|
id := uuid.New()
|
||||||
d.SetId(id.String())
|
d.SetId(id.String())
|
||||||
d.Set("items", flattenComputeList(computeList))
|
d.Set("items", flattenComputeList(result))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -325,7 +332,7 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func dataSourceCompputeListSchemaMake() map[string]*schema.Schema {
|
func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
|
||||||
res := map[string]*schema.Schema{
|
res := map[string]*schema.Schema{
|
||||||
"includedeleted": {
|
"includedeleted": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
@@ -339,7 +346,12 @@ func dataSourceCompputeListSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
"ignore_k8s": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
Description: "If set to true, ignores any VMs associated with any k8s cluster",
|
||||||
|
},
|
||||||
"items": {
|
"items": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -362,6 +374,6 @@ func DataSourceComputeList() *schema.Resource {
|
|||||||
Default: &constants.Timeout60s,
|
Default: &constants.Timeout60s,
|
||||||
},
|
},
|
||||||
|
|
||||||
Schema: dataSourceCompputeListSchemaMake(),
|
Schema: dataSourceComputeListSchemaMake(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -296,7 +296,6 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) er
|
|||||||
log.Debugf("flattenCompute: ID %d, RG ID %d", computeRec.ID, computeRec.RGID)
|
log.Debugf("flattenCompute: ID %d, RG ID %d", computeRec.ID, computeRec.RGID)
|
||||||
|
|
||||||
devices, _ := json.Marshal(computeRec.Devices)
|
devices, _ := json.Marshal(computeRec.Devices)
|
||||||
userdata, _ := json.Marshal(computeRec.Userdata)
|
|
||||||
bootDisk := findBootDisk(computeRec.Disks)
|
bootDisk := findBootDisk(computeRec.Disks)
|
||||||
|
|
||||||
//check extraDisks, ipa_type, is,
|
//check extraDisks, ipa_type, is,
|
||||||
@@ -307,16 +306,13 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) er
|
|||||||
d.Set("affinity_weight", computeRec.AffinityWeight)
|
d.Set("affinity_weight", computeRec.AffinityWeight)
|
||||||
d.Set("arch", computeRec.Architecture)
|
d.Set("arch", computeRec.Architecture)
|
||||||
d.Set("boot_order", computeRec.BootOrder)
|
d.Set("boot_order", computeRec.BootOrder)
|
||||||
d.Set("boot_disk_size", computeRec.BootDiskSize)
|
d.Set("boot_disk_size", bootDisk.SizeMax)
|
||||||
d.Set("boot_disk", flattenBootDisk(bootDisk))
|
d.Set("boot_disk", flattenBootDisk(bootDisk))
|
||||||
d.Set("boot_disk_id", bootDisk.ID)
|
d.Set("boot_disk_id", bootDisk.ID)
|
||||||
d.Set("sep_id", bootDisk.SepID)
|
d.Set("sep_id", bootDisk.SepID)
|
||||||
d.Set("pool", bootDisk.Pool)
|
d.Set("pool", bootDisk.Pool)
|
||||||
d.Set("clone_reference", computeRec.CloneReference)
|
d.Set("clone_reference", computeRec.CloneReference)
|
||||||
d.Set("clones", computeRec.Clones)
|
d.Set("clones", computeRec.Clones)
|
||||||
if string(userdata) != "{}" {
|
|
||||||
d.Set("cloud_init", string(userdata))
|
|
||||||
}
|
|
||||||
d.Set("computeci_id", computeRec.ComputeCIID)
|
d.Set("computeci_id", computeRec.ComputeCIID)
|
||||||
d.Set("created_by", computeRec.CreatedBy)
|
d.Set("created_by", computeRec.CreatedBy)
|
||||||
d.Set("created_time", computeRec.CreatedTime)
|
d.Set("created_time", computeRec.CreatedTime)
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ package kvmvm
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
|
||||||
@@ -142,8 +143,8 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
if ok {
|
if ok {
|
||||||
userdata := argVal.(string)
|
userdata := argVal.(string)
|
||||||
if userdata != "" && userdata != "applied" {
|
if userdata != "" && userdata != "applied" {
|
||||||
createReqPPC.Userdata = userdata
|
createReqPPC.Userdata = strings.TrimSpace(userdata)
|
||||||
createReqX86.Userdata = userdata
|
createReqX86.Userdata = strings.TrimSpace(userdata)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -592,7 +593,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Debugf("resourceComputeUpdate: enable=%t Compute ID %s after completing its resource configuration", d.Id(), enabled)
|
log.Debugf("resourceComputeUpdate: enable=%s Compute ID %v after completing its resource configuration", d.Id(), enabled)
|
||||||
}
|
}
|
||||||
|
|
||||||
// check compute statuses
|
// check compute statuses
|
||||||
@@ -702,9 +703,12 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
|
|
||||||
if d.HasChange("description") || d.HasChange("name") {
|
if d.HasChange("description") || d.HasChange("name") {
|
||||||
req := compute.UpdateRequest{
|
req := compute.UpdateRequest{
|
||||||
ComputeID: computeRec.ID,
|
ComputeID: computeRec.ID,
|
||||||
Name: d.Get("name").(string),
|
Name: d.Get("name").(string),
|
||||||
Description: d.Get("desc").(string),
|
}
|
||||||
|
|
||||||
|
if desc, ok := d.GetOk("desc"); ok {
|
||||||
|
req.Description = desc.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := c.CloudAPI().Compute().Update(ctx, req); err != nil {
|
if _, err := c.CloudAPI().Compute().Update(ctx, req); err != nil {
|
||||||
@@ -1531,7 +1535,6 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
"name": {
|
"name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
|
||||||
Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.",
|
Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.",
|
||||||
},
|
},
|
||||||
|
|
||||||
@@ -1832,7 +1835,7 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
"started": {
|
"started": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Default: true,
|
||||||
Description: "Is compute started.",
|
Description: "Is compute started.",
|
||||||
},
|
},
|
||||||
"detach_disks": {
|
"detach_disks": {
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ package kvmvm
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
@@ -43,6 +44,16 @@ import (
|
|||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func matchComputes(computeList compute.ListComputes) compute.ListComputes {
|
||||||
|
matched, _ := regexp.Compile("[a-zA-Z]+\\d+-[a-zA-Z]+\\d+-[a-zA-Z]+\\d+")
|
||||||
|
result := computeList.FilterFunc(func(ic compute.ItemCompute) bool {
|
||||||
|
res := matched.Match([]byte(ic.Name))
|
||||||
|
return !res
|
||||||
|
})
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error {
|
func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error {
|
||||||
c := m.(*controller.ControllerCfg)
|
c := m.(*controller.ControllerCfg)
|
||||||
|
|
||||||
|
|||||||
@@ -139,8 +139,8 @@ func resourceLBBackendServerDelete(ctx context.Context, d *schema.ResourceData,
|
|||||||
c := m.(*controller.ControllerCfg)
|
c := m.(*controller.ControllerCfg)
|
||||||
req := lb.BackendServerDeleteRequest{
|
req := lb.BackendServerDeleteRequest{
|
||||||
LBID: uint64(d.Get("lb_id").(int)),
|
LBID: uint64(d.Get("lb_id").(int)),
|
||||||
BackendName: d.Get("name").(string),
|
BackendName: d.Get("backend_name").(string),
|
||||||
ServerName: d.Get("backend_name").(string),
|
ServerName: d.Get("name").(string),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = c.CloudAPI().LB().BackendServerDelete(ctx, req)
|
_, err = c.CloudAPI().LB().BackendServerDelete(ctx, req)
|
||||||
|
|||||||
@@ -578,7 +578,7 @@ func resourceVinsUpdate(ctx context.Context, d *schema.ResourceData, m interface
|
|||||||
natRule := natRuleInterface.(map[string]interface{})
|
natRule := natRuleInterface.(map[string]interface{})
|
||||||
req := vins.NATRuleDelRequest{
|
req := vins.NATRuleDelRequest{
|
||||||
VINSID: vinsData.ID,
|
VINSID: vinsData.ID,
|
||||||
RuleID: uint64(natRule["rule_id"].(int)),
|
RuleID: int64(natRule["rule_id"].(int)),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := c.CloudAPI().VINS().NATRuleDel(ctx, req)
|
_, err := c.CloudAPI().VINS().NATRuleDel(ctx, req)
|
||||||
|
|||||||
@@ -36,13 +36,18 @@ data "decort_kvmvm_list" "compute_list" {
|
|||||||
#опциональный параметр
|
#опциональный параметр
|
||||||
#тип - число
|
#тип - число
|
||||||
#если не задан - выводятся все доступные данные
|
#если не задан - выводятся все доступные данные
|
||||||
page = 1
|
page = 1
|
||||||
|
|
||||||
#размер страницы
|
#размер страницы
|
||||||
#опциональный параметр
|
#опциональный параметр
|
||||||
#тип - число
|
#тип - число
|
||||||
#если не задан - выводятся все доступные данные
|
#если не задан - выводятся все доступные данные
|
||||||
size = 1
|
size = 1
|
||||||
|
|
||||||
|
# Включить в список вывода ВМ, принадлежащие k8s кластерам (при значении параметра true)
|
||||||
|
# опциональный параметр
|
||||||
|
# bool (default = false)
|
||||||
|
ignore_k8s = true
|
||||||
}
|
}
|
||||||
|
|
||||||
output "output" {
|
output "output" {
|
||||||
|
|||||||
97
samples/cloudapi/resource_k8s_cp/main.tf
Normal file
97
samples/cloudapi/resource_k8s_cp/main.tf
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
#Расскомментируйте этот код,
|
||||||
|
#и внесите необходимые правки в версию и путь,
|
||||||
|
#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером
|
||||||
|
/*
|
||||||
|
terraform {
|
||||||
|
required_providers {
|
||||||
|
decort = {
|
||||||
|
source = "basis/decort/decort"
|
||||||
|
version = "<VERSION>"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
provider "decort" {
|
||||||
|
authenticator = "oauth2"
|
||||||
|
oauth2_url = "https://sso.digitalenergy.online"
|
||||||
|
controller_url = "https://mr4.digitalenergy.online"
|
||||||
|
app_id = ""
|
||||||
|
app_secret = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "decort_k8s_cp" "cp" {
|
||||||
|
# Название кластера
|
||||||
|
# Обязательный параметр
|
||||||
|
# string
|
||||||
|
name = "k8s-cp"
|
||||||
|
|
||||||
|
# K8sCI ID
|
||||||
|
# Обязательный параметр
|
||||||
|
# int
|
||||||
|
k8sci_id = 55
|
||||||
|
|
||||||
|
# Плагин сети (flannel, weavenet или calico)
|
||||||
|
# Обязательный параметр
|
||||||
|
# string
|
||||||
|
network_plugin = "flannel"
|
||||||
|
|
||||||
|
# ID ресурсной группы
|
||||||
|
# Обязательный параметр
|
||||||
|
# int
|
||||||
|
rg_id = 1387
|
||||||
|
|
||||||
|
# Кол-во ядер мастер-узла
|
||||||
|
# Опциональный параметр
|
||||||
|
# int
|
||||||
|
cpu = 2
|
||||||
|
|
||||||
|
# Объем RAM мастер-узла
|
||||||
|
# Опциональный параметр
|
||||||
|
# int
|
||||||
|
ram = 2048
|
||||||
|
|
||||||
|
# Кол-во ВМ мастер-узла (1 или 3)
|
||||||
|
# Опциональный параметр
|
||||||
|
# int
|
||||||
|
num = 1
|
||||||
|
|
||||||
|
# Размер диска мастер-узла
|
||||||
|
# Опциональный параметр
|
||||||
|
# int
|
||||||
|
disk = 10
|
||||||
|
|
||||||
|
# Описание кластера
|
||||||
|
# Опциональный параметр
|
||||||
|
# string
|
||||||
|
desc = "<DESCRIPTION>"
|
||||||
|
|
||||||
|
# ID внешней сети
|
||||||
|
# Опциональный параметр
|
||||||
|
# id
|
||||||
|
extnet_id = 13
|
||||||
|
|
||||||
|
# Storage Endpoint ID
|
||||||
|
# Опциональный параметр
|
||||||
|
# id
|
||||||
|
sep_id = 0
|
||||||
|
|
||||||
|
# SEP Pool
|
||||||
|
# Опциональный параметр
|
||||||
|
# string
|
||||||
|
sep_pool = "pool"
|
||||||
|
|
||||||
|
# Старт/Стоп кластера
|
||||||
|
# Опциональный параметр
|
||||||
|
# bool
|
||||||
|
start = true
|
||||||
|
|
||||||
|
# Создать кластер с/без балансировщика нагрузки
|
||||||
|
# Опциональный параметр
|
||||||
|
# bool
|
||||||
|
with_lb = true
|
||||||
|
}
|
||||||
|
|
||||||
|
output "cp_out" {
|
||||||
|
value = decort_k8s_cp.cp
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user