parent
371bb0d90f
commit
0e64974821
@ -1,10 +1,8 @@
|
||||
## Version 4.1.1
|
||||
## Version 4.2.0
|
||||
|
||||
## Bug Fixes
|
||||
- Resolved possible context cancelation issues when awaiting platform response
|
||||
- resource decort_k8s_wg:
|
||||
- Added ignore of "workersGroupName" label
|
||||
- resource decort_kvmvm:
|
||||
- Fixed "boot_disk_size" incorrect state
|
||||
- Fixed disappearing of "cloud_init" field value
|
||||
- Set "started" default value to true
|
||||
## Features
|
||||
- Added "decort_k8s_cp" resource:
|
||||
- Used to create k8s cluster and manage control plane, therefore does not contain any info about worker nodes
|
||||
- Use decort_k8s_wg to add/delete/manage worker nodes with decort_k8s_cp
|
||||
- Added "ignore_k8s" field to "decort_kvmvm_list" data source.
|
||||
- Set to true to list all computes not associated with any k8s cluster.
|
||||
|
@ -0,0 +1,715 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
Tim Tkachev, <tvtkachev@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package k8s
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/tasks"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
|
||||
)
|
||||
|
||||
func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
log.Debugf("resourceK8sControlPlaneCreate: called with name %s, rg %d", d.Get("name").(string), d.Get("rg_id").(int))
|
||||
|
||||
haveRGID, err := existRGID(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
if !haveRGID {
|
||||
return diag.Errorf("resourceK8sCreate: can't create k8s cluster because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
|
||||
}
|
||||
|
||||
haveK8sciID, err := existK8sCIID(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
if !haveK8sciID {
|
||||
return diag.Errorf("resourceK8sCreate: can't create k8s cluster because K8sCIID %d is not allowed or does not exist", d.Get("k8sci_id").(int))
|
||||
}
|
||||
|
||||
if _, ok := d.GetOk("extnet_id"); ok {
|
||||
haveExtNetID, err := existExtNetID(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
if !haveExtNetID {
|
||||
return diag.Errorf("resourceK8sCreate: can't create k8s cluster because ExtNetID %d is not allowed or does not exist", d.Get("extnet_id").(int))
|
||||
}
|
||||
}
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
createReq := k8s.CreateRequest{}
|
||||
|
||||
createReq.Name = d.Get("name").(string)
|
||||
createReq.RGID = uint64(d.Get("rg_id").(int))
|
||||
createReq.K8SCIID = uint64(d.Get("k8sci_id").(int))
|
||||
createReq.WorkerGroupName = "temp"
|
||||
createReq.NetworkPlugin = d.Get("network_plugin").(string)
|
||||
|
||||
if num, ok := d.GetOk("num"); ok {
|
||||
createReq.MasterNum = uint(num.(int))
|
||||
} else {
|
||||
createReq.MasterNum = 1
|
||||
}
|
||||
|
||||
if cpu, ok := d.GetOk("cpu"); ok {
|
||||
createReq.MasterCPU = uint(cpu.(int))
|
||||
} else {
|
||||
createReq.MasterCPU = 2
|
||||
}
|
||||
|
||||
if ram, ok := d.GetOk("ram"); ok {
|
||||
createReq.MasterRAM = uint(ram.(int))
|
||||
} else {
|
||||
createReq.MasterRAM = 2048
|
||||
}
|
||||
|
||||
if disk, ok := d.GetOk("disk"); ok {
|
||||
createReq.MasterDisk = uint(disk.(int))
|
||||
} else {
|
||||
createReq.MasterDisk = 0
|
||||
}
|
||||
|
||||
if sepId, ok := d.GetOk("sep_id"); ok {
|
||||
createReq.MasterSEPID = uint64(sepId.(int))
|
||||
}
|
||||
|
||||
if sepPool, ok := d.GetOk("sep_pool"); ok {
|
||||
createReq.MasterSEPPool = sepPool.(string)
|
||||
}
|
||||
|
||||
if withLB, ok := d.GetOk("with_lb"); ok {
|
||||
createReq.WithLB = withLB.(bool)
|
||||
} else {
|
||||
createReq.WithLB = true
|
||||
}
|
||||
|
||||
if extNet, ok := d.GetOk("extnet_id"); ok {
|
||||
createReq.ExtNetID = uint64(extNet.(int))
|
||||
} else {
|
||||
createReq.ExtNetID = 0
|
||||
}
|
||||
|
||||
if desc, ok := d.GetOk("desc"); ok {
|
||||
createReq.Description = desc.(string)
|
||||
}
|
||||
|
||||
resp, err := c.CloudAPI().K8S().Create(ctx, createReq)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
taskReq := tasks.GetRequest{
|
||||
AuditID: strings.Trim(resp, `"`),
|
||||
}
|
||||
|
||||
for {
|
||||
task, err := c.CloudAPI().Tasks().Get(ctx, taskReq)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
log.Debugf("resourceK8sControlPlaneCreate: instance creating - %s", task.Stage)
|
||||
|
||||
if task.Completed {
|
||||
if task.Error != "" {
|
||||
return diag.FromErr(fmt.Errorf("cannot create k8s instance: %v", task.Error))
|
||||
}
|
||||
|
||||
d.SetId(strconv.Itoa(int(task.Result)))
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(time.Second * 10)
|
||||
}
|
||||
|
||||
cluster, err := utilityK8sCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
delWGReq := k8s.WorkersGroupDeleteRequest{
|
||||
K8SID: cluster.ID,
|
||||
WorkersGroupID: cluster.K8SGroups.Workers[0].ID,
|
||||
}
|
||||
|
||||
_, err = c.CloudAPI().K8S().WorkersGroupDelete(ctx, delWGReq)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
return resourceK8sCPRead(ctx, d, m)
|
||||
}
|
||||
|
||||
func resourceK8sCPRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
cluster, err := utilityK8sCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
d.SetId("")
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
hasChanged := false
|
||||
|
||||
switch cluster.Status {
|
||||
case status.Modeled:
|
||||
return diag.Errorf("The k8s cluster is in status: %s, please, contact support for more information", cluster.Status)
|
||||
case status.Creating:
|
||||
case status.Created:
|
||||
case status.Deleting:
|
||||
case status.Deleted:
|
||||
id, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
restoreReq := k8s.RestoreRequest{
|
||||
K8SID: id,
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().K8S().Restore(ctx, restoreReq)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
enableReq := k8s.DisableEnableRequest{
|
||||
K8SID: id,
|
||||
}
|
||||
|
||||
_, err = c.CloudAPI().K8S().Enable(ctx, enableReq)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
hasChanged = true
|
||||
case status.Destroying:
|
||||
return diag.Errorf("The k8s cluster is in progress with status: %s", cluster.Status)
|
||||
case status.Destroyed:
|
||||
d.SetId("")
|
||||
return resourceK8sCreate(ctx, d, m)
|
||||
case status.Enabling:
|
||||
case status.Enabled:
|
||||
case status.Disabling:
|
||||
case status.Disabled:
|
||||
log.Debugf("The k8s cluster is in status: %s, troubles may occur with update. Please, enable compute first.", cluster.Status)
|
||||
case status.Restoring:
|
||||
}
|
||||
|
||||
if hasChanged {
|
||||
cluster, err = utilityK8sCheckPresence(ctx, d, m)
|
||||
if cluster == nil {
|
||||
d.SetId("")
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if d.Get("start").(bool) {
|
||||
if cluster.TechStatus == "STOPPED" {
|
||||
req := k8s.StartRequest{
|
||||
K8SID: cluster.ID,
|
||||
}
|
||||
_, err := c.CloudAPI().K8S().Start(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
k8sList, err := utilityK8sListCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
d.SetId("")
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
curK8s := k8s.ItemK8SCluster{}
|
||||
for _, k8sCluster := range k8sList {
|
||||
if k8sCluster.ID == cluster.ID {
|
||||
curK8s = k8sCluster
|
||||
}
|
||||
}
|
||||
if curK8s.ID == 0 {
|
||||
return diag.Errorf("Cluster with id %d not found", cluster.ID)
|
||||
}
|
||||
|
||||
d.Set("vins_id", curK8s.VINSID)
|
||||
|
||||
masterComputeList := make([]compute.RecordCompute, 0, len(cluster.K8SGroups.Masters.DetailedInfo))
|
||||
for _, masterNode := range cluster.K8SGroups.Masters.DetailedInfo {
|
||||
compute, err := utilityComputeCheckPresence(ctx, d, m, masterNode.ID)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
masterComputeList = append(masterComputeList, *compute)
|
||||
}
|
||||
|
||||
var warnings dc.Warnings
|
||||
|
||||
if _, ok := d.GetOk("k8s_id"); !ok {
|
||||
for _, worker := range cluster.K8SGroups.Workers {
|
||||
err := fmt.Errorf("Found worker-group with ID %d. Make sure to import it to decort_k8s_wg resource if you wish to manage it", worker.ID)
|
||||
warnings.Add(err)
|
||||
}
|
||||
}
|
||||
|
||||
flattenResourceK8sCP(d, *cluster, masterComputeList)
|
||||
|
||||
lbGetReq := lb.GetRequest{
|
||||
LBID: cluster.LBID,
|
||||
}
|
||||
|
||||
lb, err := c.CloudAPI().LB().Get(ctx, lbGetReq)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.Set("extnet_id", lb.ExtNetID)
|
||||
d.Set("lb_ip", lb.PrimaryNode.FrontendIP)
|
||||
|
||||
kubeconfigReq := k8s.GetConfigRequest{
|
||||
K8SID: cluster.ID,
|
||||
}
|
||||
|
||||
kubeconfig, err := c.CloudAPI().K8S().GetConfig(ctx, kubeconfigReq)
|
||||
if err != nil {
|
||||
log.Warnf("could not get kubeconfig: %v", err)
|
||||
}
|
||||
|
||||
d.Set("kubeconfig", kubeconfig)
|
||||
|
||||
return warnings.Get()
|
||||
}
|
||||
|
||||
func resourceK8sCPUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
log.Debugf("resourceK8sControlPlaneUpdate: called with id %s, rg %d", d.Id(), d.Get("rg_id").(int))
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
haveRGID, err := existRGID(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
if !haveRGID {
|
||||
return diag.Errorf("resourceK8sUpdate: can't update k8s cluster because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
|
||||
}
|
||||
|
||||
haveK8sciID, err := existK8sCIID(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
if !haveK8sciID {
|
||||
return diag.Errorf("resourceK8sUpdate: can't update k8s cluster because K8sCIID %d is not allowed or does not exist", d.Get("k8sci_id").(int))
|
||||
}
|
||||
|
||||
cluster, err := utilityK8sCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
hasChanged := false
|
||||
|
||||
switch cluster.Status {
|
||||
case status.Modeled:
|
||||
return diag.Errorf("The k8s cluster is in status: %s, please, contact support for more information", cluster.Status)
|
||||
case status.Creating:
|
||||
case status.Created:
|
||||
case status.Deleting:
|
||||
case status.Deleted:
|
||||
id, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
restoreReq := k8s.RestoreRequest{
|
||||
K8SID: id,
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().K8S().Restore(ctx, restoreReq)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
enableReq := k8s.DisableEnableRequest{
|
||||
K8SID: id,
|
||||
}
|
||||
|
||||
_, err = c.CloudAPI().K8S().Enable(ctx, enableReq)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
hasChanged = true
|
||||
case status.Destroying:
|
||||
return diag.Errorf("The k8s cluster is in progress with status: %s", cluster.Status)
|
||||
case status.Destroyed:
|
||||
d.SetId("")
|
||||
return resourceK8sCreate(ctx, d, m)
|
||||
case status.Enabling:
|
||||
case status.Enabled:
|
||||
case status.Disabling:
|
||||
case status.Disabled:
|
||||
log.Debugf("The k8s cluster is in status: %s, troubles may occur with update. Please, enable compute first.", cluster.Status)
|
||||
case status.Restoring:
|
||||
}
|
||||
|
||||
if hasChanged {
|
||||
cluster, err = utilityK8sCheckPresence(ctx, d, m)
|
||||
if cluster == nil {
|
||||
d.SetId("")
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
updateReq := k8s.UpdateRequest{K8SID: cluster.ID}
|
||||
doBasicUpdate := false
|
||||
|
||||
if d.HasChange("name") {
|
||||
updateReq.Name = d.Get("name").(string)
|
||||
doBasicUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("desc") {
|
||||
updateReq.Description = d.Get("desc").(string)
|
||||
doBasicUpdate = true
|
||||
}
|
||||
|
||||
if doBasicUpdate {
|
||||
_, err := c.CloudAPI().K8S().Update(ctx, updateReq)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("start") {
|
||||
if d.Get("start").(bool) {
|
||||
if cluster.TechStatus == "STOPPED" {
|
||||
req := k8s.StartRequest{
|
||||
K8SID: cluster.ID,
|
||||
}
|
||||
_, err := c.CloudAPI().K8S().Start(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if cluster.TechStatus == "STARTED" {
|
||||
req := k8s.StopRequest{
|
||||
K8SID: cluster.ID,
|
||||
}
|
||||
_, err := c.CloudAPI().K8S().Stop(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("num") {
|
||||
oldVal, newVal := d.GetChange("num")
|
||||
|
||||
if oldVal.(int) > newVal.(int) {
|
||||
ids := make([]string, 0)
|
||||
for i := oldVal.(int) - 1; i >= newVal.(int); i-- {
|
||||
id := cluster.K8SGroups.Masters.DetailedInfo[i].ID
|
||||
ids = append(ids, strconv.FormatUint(id, 10))
|
||||
}
|
||||
|
||||
req := k8s.DeleteMasterFromGroupRequest{
|
||||
K8SID: cluster.ID,
|
||||
MasterGroupID: cluster.K8SGroups.Masters.ID,
|
||||
MasterIDs: ids,
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().K8S().DeleteMasterFromGroup(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceK8sCPDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
log.Debugf("resourceK8sControlPlaneDelete: called with id %s, rg %d", d.Id(), d.Get("rg_id").(int))
|
||||
|
||||
cluster, err := utilityK8sCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := k8s.DeleteRequest{
|
||||
K8SID: cluster.ID,
|
||||
Permanently: true,
|
||||
}
|
||||
|
||||
_, err = c.CloudAPI().K8S().Delete(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceK8sCPSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "Name of the cluster.",
|
||||
},
|
||||
"rg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Description: "Resource group ID that this instance belongs to.",
|
||||
},
|
||||
"k8sci_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Description: "ID of the k8s catalog item to base this instance on.",
|
||||
},
|
||||
"network_plugin": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "Network plugin to be used",
|
||||
ValidateFunc: validation.StringInSlice([]string{"flannel", "weavenet", "calico"}, true),
|
||||
},
|
||||
"num": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ValidateFunc: validation.IntInSlice([]int{1, 3}),
|
||||
Description: "Number of VMs to create. Can be either 1 or 3",
|
||||
},
|
||||
"cpu": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Node CPU count.",
|
||||
},
|
||||
"ram": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Node RAM in MB.",
|
||||
},
|
||||
"disk": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Node boot disk size in GB.",
|
||||
},
|
||||
"sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Storage Endpoint ID",
|
||||
},
|
||||
"sep_pool": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Storage Endpoint Pool",
|
||||
},
|
||||
"with_lb": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: true,
|
||||
Description: "Create k8s with load balancer if true.",
|
||||
},
|
||||
"extnet_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Description: "ID of the external network to connect workers to. If omitted network will be chosen by the platfom.",
|
||||
},
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Text description of this instance.",
|
||||
},
|
||||
"start": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: true,
|
||||
Description: "Start k8s cluster.",
|
||||
},
|
||||
"detailed_info": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: detailedInfoSchemaMake(),
|
||||
},
|
||||
},
|
||||
"master_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Master group ID.",
|
||||
},
|
||||
"master_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Master group name.",
|
||||
},
|
||||
"acl": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: aclGroupSchemaMake(),
|
||||
},
|
||||
},
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"bservice_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"created_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"k8s_ci_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"lb_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"k8s_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"lb_ip": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "IP address of default load balancer.",
|
||||
},
|
||||
"rg_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_by": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"updated_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"kubeconfig": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Kubeconfig for cluster access.",
|
||||
},
|
||||
"vins_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "ID of default vins for this instace.",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func ResourceK8sCP() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
CreateContext: resourceK8sCPCreate,
|
||||
ReadContext: resourceK8sCPRead,
|
||||
UpdateContext: resourceK8sCPUpdate,
|
||||
DeleteContext: resourceK8sCPDelete,
|
||||
|
||||
Importer: &schema.ResourceImporter{
|
||||
StateContext: schema.ImportStatePassthroughContext,
|
||||
},
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Create: &constants.Timeout30m,
|
||||
Read: &constants.Timeout600s,
|
||||
Update: &constants.Timeout600s,
|
||||
Delete: &constants.Timeout600s,
|
||||
Default: &constants.Timeout600s,
|
||||
},
|
||||
|
||||
Schema: resourceK8sCPSchemaMake(),
|
||||
}
|
||||
}
|
Loading…
Reference in new issue