This commit is contained in:
KasimBaybikov
2023-05-04 10:08:25 +03:00
parent 9bad8a6947
commit 8ca233dd32
288 changed files with 6645 additions and 11464 deletions

View File

@@ -1,56 +0,0 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package k8s
const (
K8sCreateAPI = "/restmachine/cloudapi/k8s/create"
K8sGetAPI = "/restmachine/cloudapi/k8s/get"
K8sUpdateAPI = "/restmachine/cloudapi/k8s/update"
K8sDeleteAPI = "/restmachine/cloudapi/k8s/delete"
K8sListAPI = "/restmachine/cloudapi/k8s/list"
K8sListDeletedAPI = "/restmachine/cloudapi/k8s/listDeleted"
K8sRestoreAPI = "/restmachine/cloudapi/k8s/restore"
K8sEnableAPI = "/restmachine/cloudapi/k8s/enable"
K8sWgCreateAPI = "/restmachine/cloudapi/k8s/workersGroupAdd"
K8sWgDeleteAPI = "/restmachine/cloudapi/k8s/workersGroupDelete"
K8sWorkerAddAPI = "/restmachine/cloudapi/k8s/workerAdd"
K8sWorkerDeleteAPI = "/restmachine/cloudapi/k8s/deleteWorkerFromGroup"
K8sGetConfigAPI = "/restmachine/cloudapi/k8s/getConfig"
LbGetAPI = "/restmachine/cloudapi/lb/get"
AsyncTaskGetAPI = "/restmachine/cloudapi/tasks/get"
)

View File

@@ -34,52 +34,53 @@ package k8s
import (
"context"
"encoding/json"
"net/url"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm"
)
func dataSourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
k8s, err := utilityDataK8sCheckPresence(ctx, d, m)
cluster, err := utilityDataK8sCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(k8s.ID, 10))
d.SetId(strconv.FormatUint(cluster.ID, 10))
k8sList, err := utilityK8sListCheckPresence(ctx, d, m, K8sListAPI)
k8sList, err := utilityK8sListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
curK8s := K8SItem{}
curK8s := k8s.ItemK8SCluster{}
for _, k8sCluster := range k8sList {
if k8sCluster.ID == k8s.ID {
if k8sCluster.ID == cluster.ID {
curK8s = k8sCluster
}
}
if curK8s.ID == 0 {
return diag.Errorf("Cluster with id %d not found in List clusters", k8s.ID)
return diag.Errorf("Cluster with id %d not found in List clusters", cluster.ID)
}
d.Set("vins_id", curK8s.VINSID)
masterComputeList := make([]kvmvm.ComputeGetResp, 0, len(k8s.K8SGroups.Masters.DetailedInfo))
workersComputeList := make([]kvmvm.ComputeGetResp, 0, len(k8s.K8SGroups.Workers))
for _, masterNode := range k8s.K8SGroups.Masters.DetailedInfo {
masterComputeList := make([]compute.RecordCompute, 0, len(cluster.K8SGroups.Masters.DetailedInfo))
workersComputeList := make([]compute.RecordCompute, 0, len(cluster.K8SGroups.Workers))
for _, masterNode := range cluster.K8SGroups.Masters.DetailedInfo {
compute, err := utilityComputeCheckPresence(ctx, d, m, masterNode.ID)
if err != nil {
return diag.FromErr(err)
}
masterComputeList = append(masterComputeList, *compute)
}
for _, worker := range k8s.K8SGroups.Workers {
for _, worker := range cluster.K8SGroups.Workers {
for _, info := range worker.DetailedInfo {
compute, err := utilityComputeCheckPresence(ctx, d, m, info.ID)
if err != nil {
@@ -90,29 +91,27 @@ func dataSourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{
}
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("k8sId", d.Id())
kubeconfig, err := c.DecortAPICall(ctx, "POST", K8sGetConfigAPI, urlValues)
k8sId, _ := strconv.ParseUint(d.Id(), 10, 64)
getConfigReq := k8s.GetConfigRequest{K8SID: k8sId}
kubeconfig, err := c.CloudAPI().K8S().GetConfig(ctx, getConfigReq)
if err != nil {
log.Warnf("could not get kubeconfig: %v", err)
}
d.Set("kubeconfig", kubeconfig)
urlValues = &url.Values{}
urlValues.Add("lbId", strconv.FormatUint(k8s.LBID, 10))
resp, err := c.DecortAPICall(ctx, "POST", LbGetAPI, urlValues)
getLbReq := lb.GetRequest{LBID: cluster.LBID}
lb, err := c.CloudAPI().LB().Get(ctx, getLbReq)
if err != nil {
return diag.FromErr(err)
}
var lb LbRecord
if err := json.Unmarshal([]byte(resp), &lb); err != nil {
return diag.FromErr(err)
}
d.Set("extnet_id", lb.ExtNetID)
d.Set("lb_ip", lb.PrimaryNode.FrontendIP)
flattenK8sData(d, *k8s, masterComputeList, workersComputeList)
flattenK8sData(d, *cluster, masterComputeList, workersComputeList)
return nil
}

View File

@@ -42,7 +42,7 @@ import (
)
func dataSourceK8sListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
k8sList, err := utilityK8sListCheckPresence(ctx, d, m, K8sListAPI)
k8sList, err := utilityK8sListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)

View File

@@ -42,7 +42,7 @@ import (
)
func dataSourceK8sListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
k8sList, err := utilityK8sListCheckPresence(ctx, d, m, K8sListDeletedAPI)
k8sList, err := utilityK8sListDeletedCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)

View File

@@ -34,40 +34,14 @@ package k8s
import (
"context"
"encoding/json"
"net/url"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm"
)
func utilityK8sWgListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (K8SGroupList, error) {
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("k8sId", strconv.Itoa(d.Get("k8s_id").(int)))
resp, err := c.DecortAPICall(ctx, "POST", K8sGetAPI, urlValues)
if err != nil {
return nil, err
}
if resp == "" {
return nil, nil
}
var k8s K8SRecord
if err := json.Unmarshal([]byte(resp), &k8s); err != nil {
return nil, err
}
return k8s.K8SGroups.Workers, nil
}
func dataSourceK8sWgListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
wgList, err := utilityK8sWgListCheckPresence(ctx, d, m)
if err != nil {
@@ -76,9 +50,9 @@ func dataSourceK8sWgListRead(ctx context.Context, d *schema.ResourceData, m inte
d.SetId(strconv.Itoa(d.Get("k8s_id").(int)))
workersComputeList := make(map[uint64][]kvmvm.ComputeGetResp)
workersComputeList := make(map[uint64][]compute.RecordCompute)
for _, worker := range wgList {
workersComputeList[worker.ID] = make([]kvmvm.ComputeGetResp, 0, len(worker.DetailedInfo))
workersComputeList[worker.ID] = make([]compute.RecordCompute, 0, len(worker.DetailedInfo))
for _, info := range worker.DetailedInfo {
compute, err := utilityComputeCheckPresence(ctx, d, m, info.ID)
if err != nil {

View File

@@ -34,10 +34,11 @@ package k8s
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
)
func flattenAclList(aclList ACLList) []map[string]interface{} {
func flattenAclList(aclList k8s.ListACL) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, acl := range aclList {
temp := map[string]interface{}{
@@ -53,7 +54,7 @@ func flattenAclList(aclList ACLList) []map[string]interface{} {
return res
}
func flattenAcl(acl ACLGroup) []map[string]interface{} {
func flattenAcl(acl k8s.RecordACL) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"account_acl": flattenAclList(acl.AccountACL),
@@ -65,11 +66,11 @@ func flattenAcl(acl ACLGroup) []map[string]interface{} {
return res
}
func flattenInterfaces(interfaces []kvmvm.InterfaceRecord) []map[string]interface{} {
func flattenInterfaces(interfaces compute.ListInterfaces) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, interfaceCompute := range interfaces {
temp := map[string]interface{}{
"def_gw": interfaceCompute.DefaultGW,
"def_gw": interfaceCompute.DefGW,
"ip_address": interfaceCompute.IPAddress,
}
res = append(res, temp)
@@ -78,7 +79,7 @@ func flattenInterfaces(interfaces []kvmvm.InterfaceRecord) []map[string]interfac
return res
}
func flattenDetailedInfo(detailedInfoList DetailedInfoList, computes []kvmvm.ComputeGetResp) []map[string]interface{} {
func flattenDetailedInfo(detailedInfoList k8s.ListDetailedInfo, computes []compute.RecordCompute) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
if computes != nil {
for i, detailedInfo := range detailedInfoList {
@@ -88,8 +89,8 @@ func flattenDetailedInfo(detailedInfoList DetailedInfoList, computes []kvmvm.Com
"status": detailedInfo.Status,
"tech_status": detailedInfo.TechStatus,
"interfaces": flattenInterfaces(computes[i].Interfaces),
"natable_vins_ip": computes[i].NatableVinsIP,
"natable_vins_network": computes[i].NatableVinsNet,
"natable_vins_ip": computes[i].NatableVINSIP,
"natable_vins_network": computes[i].NatableVINSNetwork,
}
res = append(res, temp)
}
@@ -108,7 +109,7 @@ func flattenDetailedInfo(detailedInfoList DetailedInfoList, computes []kvmvm.Com
return res
}
func flattenMasterGroup(mastersGroup MasterGroup, masters []kvmvm.ComputeGetResp) []map[string]interface{} {
func flattenMasterGroup(mastersGroup k8s.MasterGroup, masters []compute.RecordCompute) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"cpu": mastersGroup.CPU,
@@ -124,7 +125,7 @@ func flattenMasterGroup(mastersGroup MasterGroup, masters []kvmvm.ComputeGetResp
return res
}
func flattenK8sGroup(k8SGroupList K8SGroupList, workers []kvmvm.ComputeGetResp) []map[string]interface{} {
func flattenK8sGroup(k8SGroupList k8s.ListK8SGroups, workers []compute.RecordCompute) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, k8sGroup := range k8SGroupList {
temp := map[string]interface{}{
@@ -146,7 +147,7 @@ func flattenK8sGroup(k8SGroupList K8SGroupList, workers []kvmvm.ComputeGetResp)
return res
}
func flattenK8sGroups(k8sGroups K8SGroups, masters []kvmvm.ComputeGetResp, workers []kvmvm.ComputeGetResp) []map[string]interface{} {
func flattenK8sGroups(k8sGroups k8s.RecordK8SGroups, masters []compute.RecordCompute, workers []compute.RecordCompute) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"masters": flattenMasterGroup(k8sGroups.Masters, masters),
@@ -156,30 +157,30 @@ func flattenK8sGroups(k8sGroups K8SGroups, masters []kvmvm.ComputeGetResp, worke
return res
}
func flattenK8sData(d *schema.ResourceData, k8s K8SRecord, masters []kvmvm.ComputeGetResp, workers []kvmvm.ComputeGetResp) {
d.Set("acl", flattenAcl(k8s.ACL))
d.Set("account_id", k8s.AccountID)
d.Set("account_name", k8s.AccountName)
d.Set("bservice_id", k8s.BServiceID)
d.Set("k8sci_id", k8s.CIID)
d.Set("created_by", k8s.CreatedBy)
d.Set("created_time", k8s.CreatedTime)
d.Set("deleted_by", k8s.DeletedBy)
d.Set("deleted_time", k8s.DeletedTime)
d.Set("k8s_ci_name", k8s.K8CIName)
d.Set("masters", flattenMasterGroup(k8s.K8SGroups.Masters, masters))
d.Set("workers", flattenK8sGroup(k8s.K8SGroups.Workers, workers))
d.Set("lb_id", k8s.LBID)
d.Set("name", k8s.Name)
d.Set("rg_id", k8s.RGID)
d.Set("rg_name", k8s.RGName)
d.Set("status", k8s.Status)
d.Set("tech_status", k8s.TechStatus)
d.Set("updated_by", k8s.UpdatedBy)
d.Set("updated_time", k8s.UpdatedTime)
func flattenK8sData(d *schema.ResourceData, cluster k8s.RecordK8S, masters []compute.RecordCompute, workers []compute.RecordCompute) {
d.Set("acl", flattenAcl(cluster.ACL))
d.Set("account_id", cluster.AccountID)
d.Set("account_name", cluster.AccountName)
d.Set("bservice_id", cluster.BServiceID)
d.Set("k8sci_id", cluster.CIID)
d.Set("created_by", cluster.CreatedBy)
d.Set("created_time", cluster.CreatedTime)
d.Set("deleted_by", cluster.DeletedBy)
d.Set("deleted_time", cluster.DeletedTime)
d.Set("k8s_ci_name", cluster.K8CIName)
d.Set("masters", flattenMasterGroup(cluster.K8SGroups.Masters, masters))
d.Set("workers", flattenK8sGroup(cluster.K8SGroups.Workers, workers))
d.Set("lb_id", cluster.LBID)
d.Set("name", cluster.Name)
d.Set("rg_id", cluster.RGID)
d.Set("rg_name", cluster.RGName)
d.Set("status", cluster.Status)
d.Set("tech_status", cluster.TechStatus)
d.Set("updated_by", cluster.UpdatedBy)
d.Set("updated_time", cluster.UpdatedTime)
}
func flattenServiceAccount(serviceAccount ServiceAccount) []map[string]interface{} {
func flattenServiceAccount(serviceAccount k8s.RecordServiceAccount) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"guid": serviceAccount.GUID,
@@ -190,7 +191,7 @@ func flattenServiceAccount(serviceAccount ServiceAccount) []map[string]interface
return res
}
func flattenWorkersGroup(workersGroups K8SGroupList) []map[string]interface{} {
func flattenWorkersGroup(workersGroups k8s.ListK8SGroups) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, worker := range workersGroups {
temp := map[string]interface{}{
@@ -211,11 +212,7 @@ func flattenWorkersGroup(workersGroups K8SGroupList) []map[string]interface{} {
return res
}
func flattenConfig(config interface{}) map[string]interface{} {
return config.(map[string]interface{})
}
func flattenK8sItems(k8sItems K8SList) []map[string]interface{} {
func flattenK8sItems(k8sItems k8s.ListK8SClusters) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, item := range k8sItems {
temp := map[string]interface{}{
@@ -252,11 +249,11 @@ func flattenK8sItems(k8sItems K8SList) []map[string]interface{} {
return res
}
func flattenK8sList(d *schema.ResourceData, k8sItems K8SList) {
func flattenK8sList(d *schema.ResourceData, k8sItems k8s.ListK8SClusters) {
d.Set("items", flattenK8sItems(k8sItems))
}
func flattenResourceK8s(d *schema.ResourceData, k8s K8SRecord, masters []kvmvm.ComputeGetResp, workers []kvmvm.ComputeGetResp) {
func flattenResourceK8s(d *schema.ResourceData, k8s k8s.RecordK8S, masters []compute.RecordCompute, workers []compute.RecordCompute) {
wg_name := k8s.K8SGroups.Workers[0].Name
d.Set("acl", flattenAcl(k8s.ACL))
@@ -282,9 +279,10 @@ func flattenResourceK8s(d *schema.ResourceData, k8s K8SRecord, masters []kvmvm.C
d.Set("updated_by", k8s.UpdatedBy)
d.Set("updated_time", k8s.UpdatedTime)
d.Set("default_wg_id", k8s.K8SGroups.Workers[0].ID)
d.Set("network_plugin", k8s.NetworkPlugin)
}
func flattenWg(d *schema.ResourceData, wg K8SGroup, computes []kvmvm.ComputeGetResp) {
func flattenWg(d *schema.ResourceData, wg k8s.ItemK8SGroup, computes []compute.RecordCompute) {
d.Set("annotations", wg.Annotations)
d.Set("cpu", wg.CPU)
d.Set("detailed_info", flattenDetailedInfo(wg.DetailedInfo, computes))
@@ -297,7 +295,7 @@ func flattenWg(d *schema.ResourceData, wg K8SGroup, computes []kvmvm.ComputeGetR
d.Set("taints", wg.Taints)
}
func flattenWgList(wgList K8SGroupList, computesMap map[uint64][]kvmvm.ComputeGetResp) []map[string]interface{} {
func flattenWgList(wgList k8s.ListK8SGroups, computesMap map[uint64][]compute.RecordCompute) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, wg := range wgList {
computes := computesMap[wg.ID]
@@ -320,6 +318,6 @@ func flattenWgList(wgList K8SGroupList, computesMap map[uint64][]kvmvm.ComputeGe
return res
}
func flattenItemsWg(d *schema.ResourceData, wgList K8SGroupList, computes map[uint64][]kvmvm.ComputeGetResp) {
func flattenItemsWg(d *schema.ResourceData, wgList k8s.ListK8SGroups, computes map[uint64][]compute.RecordCompute) {
d.Set("items", flattenWgList(wgList, computes))
}

View File

@@ -87,21 +87,18 @@ func nodeK8sSubresourceSchemaMake() map[string]*schema.Schema {
"cpu": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "Node CPU count.",
},
"ram": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "Node RAM in MB.",
},
"disk": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "Node boot disk size in GB.",
},
}

View File

@@ -2,142 +2,68 @@ package k8s
import (
"context"
"encoding/json"
"net/url"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/extnet"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8ci"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func existK8sID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
k8sID := uint64(d.Get("k8s_id").(int))
req := k8s.ListRequest{}
k8sList := []struct {
ID int `json:"id"`
}{}
k8sListAPI := "/restmachine/cloudapi/k8s/list"
k8sListRaw, err := c.DecortAPICall(ctx, "POST", k8sListAPI, urlValues)
k8sList, err := c.CloudAPI().K8S().List(ctx, req)
if err != nil {
return false, err
}
err = json.Unmarshal([]byte(k8sListRaw), &k8sList)
if err != nil {
return false, err
}
haveK8s := false
k8sID := d.Get("k8s_id").(int)
for _, k8s := range k8sList {
if k8s.ID == k8sID {
haveK8s = true
break
}
}
return haveK8s, nil
return len(k8sList.FilterByID(k8sID)) != 0, nil
}
func existK8sCIID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
k8sciID := uint64(d.Get("k8sci_id").(int))
req := k8ci.ListRequest{}
k8sciList := []struct {
ID int `json:"id"`
}{}
k8sciListAPI := "/restmachine/cloudapi/k8ci/list"
k8sciListRaw, err := c.DecortAPICall(ctx, "POST", k8sciListAPI, urlValues)
k8sciList, err := c.CloudAPI().K8CI().List(ctx, req)
if err != nil {
return false, err
}
err = json.Unmarshal([]byte(k8sciListRaw), &k8sciList)
if err != nil {
return false, err
}
haveK8sCI := false
k8sciID := d.Get("k8sci_id").(int)
for _, k8ci := range k8sciList {
if k8ci.ID == k8sciID {
haveK8sCI = true
break
}
}
return haveK8sCI, nil
return len(k8sciList.FilterByID(k8sciID)) != 0, nil
}
func existRGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
rgID := uint64(d.Get("rg_id").(int))
req := rg.ListRequest{}
rgList := []struct {
ID int `json:"id"`
}{}
rgListAPI := "/restmachine/cloudapi/rg/list"
rgListRaw, err := c.DecortAPICall(ctx, "POST", rgListAPI, urlValues)
rgList, err := c.CloudAPI().RG().List(ctx, req)
if err != nil {
return false, err
}
err = json.Unmarshal([]byte(rgListRaw), &rgList)
if err != nil {
return false, err
}
haveRG := false
rgId := d.Get("rg_id").(int)
for _, rg := range rgList {
if rg.ID == rgId {
haveRG = true
break
}
}
return haveRG, nil
return len(rgList.FilterByID(rgID)) != 0, nil
}
func existExtNetID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
extNetID := d.Get("extnet_id").(int)
extNetID := uint64(d.Get("extnet_id").(int))
if extNetID == 0 {
return true, nil
}
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
req := extnet.ListRequest{}
extNetList := []struct {
ID int `json:"id"`
}{}
extNetListAPI := "/restmachine/cloudapi/extnet/list"
extNetListRaw, err := c.DecortAPICall(ctx, "POST", extNetListAPI, urlValues)
extNetList, err := c.CloudAPI().ExtNet().List(ctx, req)
if err != nil {
return false, err
}
err = json.Unmarshal([]byte(extNetListRaw), &extNetList)
if err != nil {
return false, err
}
haveExtNet := false
for _, extNet := range extNetList {
if extNet.ID == extNetID {
haveExtNet = true
break
}
}
return haveExtNet, nil
return len(extNetList.FilterByID(extNetID)) != 0, nil
}

View File

@@ -34,19 +34,21 @@ package k8s
import (
"context"
"encoding/json"
"fmt"
"net/url"
"strconv"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/tasks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
)
@@ -83,11 +85,13 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
}
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("name", d.Get("name").(string))
urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int)))
urlValues.Add("k8ciId", strconv.Itoa(d.Get("k8sci_id").(int)))
urlValues.Add("workerGroupName", d.Get("wg_name").(string))
createReq := k8s.CreateRequest{}
createReq.Name = d.Get("name").(string)
createReq.RGID = uint64(d.Get("rg_id").(int))
createReq.K8SCIID = uint64(d.Get("k8sci_id").(int))
createReq.WorkerGroupName = d.Get("wg_name").(string)
createReq.NetworkPlugin = d.Get("network_plugin").(string)
var masterNode K8sNodeRecord
if masters, ok := d.GetOk("masters"); ok {
@@ -95,12 +99,12 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
} else {
masterNode = nodeMasterDefault()
}
urlValues.Add("masterNum", strconv.Itoa(masterNode.Num))
urlValues.Add("masterCpu", strconv.Itoa(masterNode.Cpu))
urlValues.Add("masterRam", strconv.Itoa(masterNode.Ram))
urlValues.Add("masterDisk", strconv.Itoa(masterNode.Disk))
urlValues.Add("masterSepId", strconv.Itoa(masterNode.SepID))
urlValues.Add("masterSepPool", masterNode.SepPool)
createReq.MasterNum = uint(masterNode.Num)
createReq.MasterCPU = uint(masterNode.Cpu)
createReq.MasterRAM = uint(masterNode.Ram)
createReq.MasterDisk = uint(masterNode.Disk)
createReq.MasterSEPID = uint64(masterNode.SepID)
createReq.MasterSEPPool = masterNode.SepPool
var workerNode K8sNodeRecord
if workers, ok := d.GetOk("workers"); ok {
@@ -108,68 +112,66 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
} else {
workerNode = nodeWorkerDefault()
}
urlValues.Add("workerNum", strconv.Itoa(workerNode.Num))
urlValues.Add("workerCpu", strconv.Itoa(workerNode.Cpu))
urlValues.Add("workerRam", strconv.Itoa(workerNode.Ram))
urlValues.Add("workerDisk", strconv.Itoa(workerNode.Disk))
urlValues.Add("workerSepId", strconv.Itoa(workerNode.SepID))
urlValues.Add("workerSepPool", workerNode.SepPool)
createReq.WorkerNum = uint(workerNode.Num)
createReq.WorkerCPU = uint(workerNode.Cpu)
createReq.WorkerRAM = uint(workerNode.Ram)
createReq.WorkerDisk = uint(workerNode.Disk)
createReq.WorkerSEPID = uint64(workerNode.SepID)
createReq.WorkerSEPPool = workerNode.SepPool
if labels, ok := d.GetOk("labels"); ok {
labels := labels.([]interface{})
for _, label := range labels {
urlValues.Add("labels", label.(string))
createReq.Labels = append(createReq.Labels, label.(string))
}
}
if taints, ok := d.GetOk("taints"); ok {
taints := taints.([]interface{})
for _, taint := range taints {
urlValues.Add("taints", taint.(string))
createReq.Taints = append(createReq.Taints, taint.(string))
}
}
if annotations, ok := d.GetOk("annotations"); ok {
annotations := annotations.([]interface{})
for _, annotation := range annotations {
urlValues.Add("annotations", annotation.(string))
createReq.Annotations = append(createReq.Annotations, annotation.(string))
}
}
if withLB, ok := d.GetOk("with_lb"); ok {
urlValues.Add("withLB", strconv.FormatBool(withLB.(bool)))
createReq.WithLB = withLB.(bool)
} else {
urlValues.Add("withLB", strconv.FormatBool(true))
createReq.WithLB = true
}
if extNet, ok := d.GetOk("extnet_id"); ok {
urlValues.Add("extnetId", strconv.Itoa(extNet.(int)))
createReq.ExtNetID = uint64(extNet.(int))
} else {
urlValues.Add("extnetId", "0")
createReq.ExtNetID = 0
}
if desc, ok := d.GetOk("desc"); ok {
urlValues.Add("desc", desc.(string))
createReq.Description = desc.(string)
}
resp, err := c.DecortAPICall(ctx, "POST", K8sCreateAPI, urlValues)
resp, err := c.CloudAPI().K8S().Create(ctx, createReq)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
urlValues.Add("auditId", strings.Trim(resp, `"`))
taskReq := tasks.GetRequest{
AuditID: strings.Trim(resp, `"`),
}
for {
resp, err := c.DecortAPICall(ctx, "POST", AsyncTaskGetAPI, urlValues)
task, err := c.CloudAPI().Tasks().Get(ctx, taskReq)
if err != nil {
return diag.FromErr(err)
}
task := AsyncTask{}
if err := json.Unmarshal([]byte(resp), &task); err != nil {
return diag.FromErr(err)
}
log.Debugf("resourceK8sCreate: instance creating - %s", task.Stage)
if task.Completed {
@@ -188,7 +190,7 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
}
func resourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
k8s, err := utilityK8sCheckPresence(ctx, d, m)
cluster, err := utilityK8sCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
@@ -198,29 +200,35 @@ func resourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{})
hasChanged := false
switch k8s.Status {
switch cluster.Status {
case status.Modeled:
return diag.Errorf("The k8s cluster is in status: %s, please, contact support for more information", k8s.Status)
return diag.Errorf("The k8s cluster is in status: %s, please, contact support for more information", cluster.Status)
case status.Creating:
case status.Created:
case status.Deleting:
case status.Deleted:
urlVal := &url.Values{}
urlVal.Add("k8sId", d.Id())
id, _ := strconv.ParseUint(d.Id(), 10, 64)
restoreReq := k8s.RestoreRequest{
K8SID: id,
}
_, err := c.DecortAPICall(ctx, "POST", K8sRestoreAPI, urlVal)
_, err := c.CloudAPI().K8S().Restore(ctx, restoreReq)
if err != nil {
return diag.FromErr(err)
}
_, err = c.DecortAPICall(ctx, "POST", K8sEnableAPI, urlVal)
enableReq := k8s.DisabelEnableRequest{
K8SID: id,
}
_, err = c.CloudAPI().K8S().Enable(ctx, enableReq)
if err != nil {
return diag.FromErr(err)
}
hasChanged = true
case status.Destroying:
return diag.Errorf("The k8s cluster is in progress with status: %s", k8s.Status)
return diag.Errorf("The k8s cluster is in progress with status: %s", cluster.Status)
case status.Destroyed:
d.SetId("")
return resourceK8sCreate(ctx, d, m)
@@ -228,13 +236,13 @@ func resourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{})
case status.Enabled:
case status.Disabling:
case status.Disabled:
log.Debugf("The k8s cluster is in status: %s, troubles may occur with update. Please, enable compute first.", k8s.Status)
log.Debugf("The k8s cluster is in status: %s, troubles may occur with update. Please, enable compute first.", cluster.Status)
case status.Restoring:
}
if hasChanged {
k8s, err = utilityK8sCheckPresence(ctx, d, m)
if k8s == nil {
cluster, err = utilityK8sCheckPresence(ctx, d, m)
if cluster == nil {
d.SetId("")
if err != nil {
return diag.FromErr(err)
@@ -243,32 +251,45 @@ func resourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{})
}
}
k8sList, err := utilityK8sListCheckPresence(ctx, d, m, K8sListAPI)
if d.Get("start").(bool) {
if cluster.TechStatus == "STOPPED" {
req := k8s.StartRequest{
K8SID: cluster.ID,
}
_, err := c.CloudAPI().K8S().Start(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
k8sList, err := utilityK8sListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
curK8s := K8SItem{}
curK8s := k8s.ItemK8SCluster{}
for _, k8sCluster := range k8sList {
if k8sCluster.ID == k8s.ID {
if k8sCluster.ID == cluster.ID {
curK8s = k8sCluster
}
}
if curK8s.ID == 0 {
return diag.Errorf("Cluster with id %d not found", k8s.ID)
return diag.Errorf("Cluster with id %d not found", cluster.ID)
}
d.Set("vins_id", curK8s.VINSID)
masterComputeList := make([]kvmvm.ComputeGetResp, 0, len(k8s.K8SGroups.Masters.DetailedInfo))
workersComputeList := make([]kvmvm.ComputeGetResp, 0, len(k8s.K8SGroups.Workers))
for _, masterNode := range k8s.K8SGroups.Masters.DetailedInfo {
masterComputeList := make([]compute.RecordCompute, 0, len(cluster.K8SGroups.Masters.DetailedInfo))
workersComputeList := make([]compute.RecordCompute, 0, len(cluster.K8SGroups.Workers))
for _, masterNode := range cluster.K8SGroups.Masters.DetailedInfo {
compute, err := utilityComputeCheckPresence(ctx, d, m, masterNode.ID)
if err != nil {
return diag.FromErr(err)
}
masterComputeList = append(masterComputeList, *compute)
}
for _, worker := range k8s.K8SGroups.Workers {
for _, worker := range cluster.K8SGroups.Workers {
for _, info := range worker.DetailedInfo {
compute, err := utilityComputeCheckPresence(ctx, d, m, info.ID)
if err != nil {
@@ -278,29 +299,29 @@ func resourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{})
}
}
flattenResourceK8s(d, *k8s, masterComputeList, workersComputeList)
flattenResourceK8s(d, *cluster, masterComputeList, workersComputeList)
urlValues := &url.Values{}
urlValues.Add("lbId", strconv.FormatUint(k8s.LBID, 10))
lbGetReq := lb.GetRequest{
LBID: cluster.LBID,
}
resp, err := c.DecortAPICall(ctx, "POST", LbGetAPI, urlValues)
lb, err := c.CloudAPI().LB().Get(ctx, lbGetReq)
if err != nil {
return diag.FromErr(err)
}
var lb LbRecord
if err := json.Unmarshal([]byte(resp), &lb); err != nil {
return diag.FromErr(err)
}
d.Set("extnet_id", lb.ExtNetID)
d.Set("lb_ip", lb.PrimaryNode.FrontendIP)
urlValues = &url.Values{}
urlValues.Add("k8sId", d.Id())
kubeconfig, err := c.DecortAPICall(ctx, "POST", K8sGetConfigAPI, urlValues)
kubeconfigReq := k8s.GetConfigRequest{
K8SID: cluster.ID,
}
kubeconfig, err := c.CloudAPI().K8S().GetConfig(ctx, kubeconfigReq)
if err != nil {
log.Warnf("could not get kubeconfig: %v", err)
}
d.Set("kubeconfig", kubeconfig)
return nil
@@ -329,36 +350,42 @@ func resourceK8sUpdate(ctx context.Context, d *schema.ResourceData, m interface{
return diag.Errorf("resourceK8sUpdate: can't update k8s cluster because K8sCIID %d is not allowed or does not exist", d.Get("k8sci_id").(int))
}
k8s, err := utilityK8sCheckPresence(ctx, d, m)
cluster, err := utilityK8sCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
hasChanged := false
switch k8s.Status {
switch cluster.Status {
case status.Modeled:
return diag.Errorf("The k8s cluster is in status: %s, please, contact support for more information", k8s.Status)
return diag.Errorf("The k8s cluster is in status: %s, please, contact support for more information", cluster.Status)
case status.Creating:
case status.Created:
case status.Deleting:
case status.Deleted:
urlVal := &url.Values{}
urlVal.Add("k8sId", d.Id())
id, _ := strconv.ParseUint(d.Id(), 10, 64)
restoreReq := k8s.RestoreRequest{
K8SID: id,
}
_, err := c.DecortAPICall(ctx, "POST", K8sRestoreAPI, urlVal)
_, err := c.CloudAPI().K8S().Restore(ctx, restoreReq)
if err != nil {
return diag.FromErr(err)
}
_, err = c.DecortAPICall(ctx, "POST", K8sEnableAPI, urlVal)
enableReq := k8s.DisabelEnableRequest{
K8SID: id,
}
_, err = c.CloudAPI().K8S().Enable(ctx, enableReq)
if err != nil {
return diag.FromErr(err)
}
hasChanged = true
case status.Destroying:
return diag.Errorf("The k8s cluster is in progress with status: %s", k8s.Status)
return diag.Errorf("The k8s cluster is in progress with status: %s", cluster.Status)
case status.Destroyed:
d.SetId("")
return resourceK8sCreate(ctx, d, m)
@@ -366,13 +393,13 @@ func resourceK8sUpdate(ctx context.Context, d *schema.ResourceData, m interface{
case status.Enabled:
case status.Disabling:
case status.Disabled:
log.Debugf("The k8s cluster is in status: %s, troubles may occur with update. Please, enable compute first.", k8s.Status)
log.Debugf("The k8s cluster is in status: %s, troubles may occur with update. Please, enable compute first.", cluster.Status)
case status.Restoring:
}
if hasChanged {
k8s, err = utilityK8sCheckPresence(ctx, d, m)
if k8s == nil {
cluster, err = utilityK8sCheckPresence(ctx, d, m)
if cluster == nil {
d.SetId("")
if err != nil {
return diag.FromErr(err)
@@ -382,33 +409,64 @@ func resourceK8sUpdate(ctx context.Context, d *schema.ResourceData, m interface{
}
if d.HasChange("name") {
urlValues := &url.Values{}
urlValues.Add("k8sId", d.Id())
urlValues.Add("name", d.Get("name").(string))
req := k8s.UpdateRequest{
K8SID: cluster.ID,
Name: d.Get("name").(string),
}
_, err := c.DecortAPICall(ctx, "POST", K8sUpdateAPI, urlValues)
_, err := c.CloudAPI().K8S().Update(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("workers") {
wg := k8s.K8SGroups.Workers[0]
urlValues := &url.Values{}
urlValues.Add("k8sId", d.Id())
urlValues.Add("workersGroupId", strconv.FormatUint(wg.ID, 10))
wg := cluster.K8SGroups.Workers[0]
newWorkers := parseNode(d.Get("workers").([]interface{}))
if uint64(newWorkers.Num) > wg.Num {
urlValues.Add("num", strconv.FormatUint(uint64(newWorkers.Num)-wg.Num, 10))
if _, err := c.DecortAPICall(ctx, "POST", K8sWorkerAddAPI, urlValues); err != nil {
req := k8s.WorkerAddRequest{
K8SID: cluster.ID,
WorkersGroupID: wg.ID,
Num: uint64(newWorkers.Num - int(wg.Num)),
}
if _, err := c.CloudAPI().K8S().WorkerAdd(ctx, req); err != nil {
return diag.FromErr(err)
}
} else {
for i := int(wg.Num) - 1; i >= newWorkers.Num; i-- {
urlValues.Set("workerId", strconv.FormatUint(wg.DetailedInfo[i].ID, 10))
if _, err := c.DecortAPICall(ctx, "POST", K8sWorkerDeleteAPI, urlValues); err != nil {
req := k8s.DeleteWorkerFromGroupRequest{
K8SID: cluster.ID,
WorkersGroupID: wg.ID,
WorkerID: wg.DetailedInfo[i].ID,
}
if _, err := c.CloudAPI().K8S().DeleteWorkerFromGroup(ctx, req); err != nil {
return diag.FromErr(err)
}
}
}
}
if d.HasChange("start") {
if d.Get("start").(bool) {
if cluster.TechStatus == "STOPPED" {
req := k8s.StartRequest{
K8SID: cluster.ID,
}
_, err := c.CloudAPI().K8S().Start(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
} else {
if cluster.TechStatus == "STARTED" {
req := k8s.StopRequest{
K8SID: cluster.ID,
}
_, err := c.CloudAPI().K8S().Stop(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
@@ -421,20 +479,18 @@ func resourceK8sUpdate(ctx context.Context, d *schema.ResourceData, m interface{
func resourceK8sDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceK8sDelete: called with id %s, rg %d", d.Id(), d.Get("rg_id").(int))
k8s, err := utilityK8sCheckPresence(ctx, d, m)
if k8s == nil {
if err != nil {
return diag.FromErr(err)
}
return nil
cluster, err := utilityK8sCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("k8sId", d.Id())
urlValues.Add("permanently", "true")
req := k8s.DeleteRequest{
K8SID: cluster.ID,
Permanently: true,
}
_, err = c.DecortAPICall(ctx, "POST", K8sDeleteAPI, urlValues)
_, err = c.CloudAPI().K8S().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
@@ -466,6 +522,12 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Required: true,
Description: "Name for first worker group created with cluster.",
},
"network_plugin": {
Type: schema.TypeString,
Required: true,
Description: "Network plugin to be used",
ValidateFunc: validation.StringInSlice([]string{"flannel", "weavenet", "calico"}, true),
},
"labels": {
Type: schema.TypeList,
Optional: true,
@@ -526,6 +588,12 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "Text description of this instance.",
},
"start": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "Start k8s cluster",
},
"acl": {
Type: schema.TypeList,

View File

@@ -34,16 +34,16 @@ package k8s
import (
"context"
"net/url"
"strconv"
"strings"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm"
)
func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@@ -59,54 +59,26 @@ func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interfac
}
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("k8sId", strconv.Itoa(d.Get("k8s_id").(int)))
urlValues.Add("name", d.Get("name").(string))
urlValues.Add("workerNum", strconv.Itoa(d.Get("num").(int)))
urlValues.Add("workerCpu", strconv.Itoa(d.Get("cpu").(int)))
urlValues.Add("workerRam", strconv.Itoa(d.Get("ram").(int)))
if d.Get("disk") == nil {
urlValues.Add("workerDisk", strconv.Itoa(0))
} else {
urlValues.Add("workerDisk", strconv.Itoa(d.Get("disk").(int)))
req := k8s.WorkersGroupAddRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
Name: d.Get("name").(string),
WorkerNum: uint64(d.Get("num").(int)),
WorkerCPU: uint64(d.Get("cpu").(int)),
WorkerRAM: uint64(d.Get("ram").(int)),
}
resp, err := c.DecortAPICall(ctx, "POST", K8sWgCreateAPI, urlValues)
if d.Get("disk") == nil {
req.WorkerDisk = 0
} else {
req.WorkerDisk = uint64(d.Get("disk").(int))
}
resp, err := c.CloudAPI().K8S().WorkersGroupAdd(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId(resp)
// This code is the supposed flow, but at the time of writing it's not yet implemented by the platfom
//urlValues = &url.Values{}
//urlValues.Add("auditId", strings.Trim(resp, `"`))
//for {
//resp, err := controller.decortAPICall("POST", AsyncTaskGetAPI, urlValues)
//if err != nil {
//return err
//}
//task := AsyncTask{}
//if err := json.Unmarshal([]byte(resp), &task); err != nil {
//return err
//}
//log.Debugf("resourceK8sCreate: workers group creating - %s", task.Stage)
//if task.Completed {
//if task.Error != "" {
//return fmt.Errorf("cannot create workers group: %v", task.Error)
//}
//d.SetId(strconv.Itoa(int(task.Result)))
//break
//}
//time.Sleep(time.Second * 5)
//}
d.SetId(strconv.FormatUint(resp, 10))
return resourceK8sWgRead(ctx, d, m)
}
@@ -119,7 +91,7 @@ func resourceK8sWgRead(ctx context.Context, d *schema.ResourceData, m interface{
return diag.FromErr(err)
}
workersComputeList := make([]kvmvm.ComputeGetResp, 0, 0)
workersComputeList := make([]compute.RecordCompute, 0, 0)
for _, info := range wg.DetailedInfo {
compute, err := utilityComputeCheckPresence(ctx, d, m, info.ID)
if err != nil {
@@ -165,20 +137,27 @@ func resourceK8sWgUpdate(ctx context.Context, d *schema.ResourceData, m interfac
return diag.FromErr(err)
}
urlValues := &url.Values{}
urlValues.Add("k8sId", strconv.Itoa(d.Get("k8s_id").(int)))
urlValues.Add("workersGroupId", d.Id())
wgId, _ := strconv.ParseUint(d.Id(), 10, 64)
if newNum := d.Get("num").(int); uint64(newNum) > wg.Num {
urlValues.Add("num", strconv.FormatUint(uint64(newNum)-wg.Num, 10))
_, err := c.DecortAPICall(ctx, "POST", K8sWorkerAddAPI, urlValues)
req := k8s.WorkerAddRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: wgId,
Num: uint64(newNum) - wg.Num,
}
_, err := c.CloudAPI().K8S().WorkerAdd(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} else {
for i := int(wg.Num) - 1; i >= newNum; i-- {
urlValues.Set("workerId", strconv.FormatUint(wg.DetailedInfo[i].ID, 10))
_, err := c.DecortAPICall(ctx, "POST", K8sWorkerDeleteAPI, urlValues)
req := k8s.DeleteWorkerFromGroupRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: wgId,
WorkerID: wg.DetailedInfo[i].ID,
}
_, err := c.CloudAPI().K8S().DeleteWorkerFromGroup(ctx, req)
if err != nil {
return diag.FromErr(err)
}
@@ -192,19 +171,17 @@ func resourceK8sWgDelete(ctx context.Context, d *schema.ResourceData, m interfac
log.Debugf("resourceK8sWgDelete: called with k8s id %d", d.Get("k8s_id").(int))
wg, err := utilityK8sWgCheckPresence(ctx, d, m)
if wg == nil {
if err != nil {
return diag.FromErr(err)
}
return nil
if err != nil {
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("k8sId", strconv.Itoa(d.Get("k8s_id").(int)))
urlValues.Add("workersGroupId", strconv.FormatUint(wg.ID, 10))
req := k8s.WorkersGroupDeleteRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: wg.ID,
}
_, err = c.DecortAPICall(ctx, "POST", K8sWgDeleteAPI, urlValues)
_, err = c.CloudAPI().K8S().WorkersGroupDelete(ctx, req)
if err != nil {
return diag.FromErr(err)
}

View File

@@ -34,50 +34,37 @@ package k8s
import (
"context"
"encoding/json"
"net/url"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm"
)
func utilityK8sCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*K8SRecord, error) {
func utilityK8sCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8S, error) {
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("k8sId", d.Id())
resp, err := c.DecortAPICall(ctx, "POST", K8sGetAPI, urlValues)
k8sID, _ := strconv.ParseUint(d.Id(), 10, 64)
req := k8s.GetRequest{
K8SID: k8sID,
}
k8s, err := c.CloudAPI().K8S().Get(ctx, req)
if err != nil {
return nil, err
}
if resp == "" {
return nil, nil
}
k8s := K8SRecord{}
if err := json.Unmarshal([]byte(resp), &k8s); err != nil {
return nil, err
}
return &k8s, nil
return k8s, nil
}
func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}, computeID uint64) (*kvmvm.ComputeGetResp, error) {
func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}, computeID uint64) (*compute.RecordCompute, error) {
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("computeId", strconv.FormatUint(computeID, 10))
computeRaw, err := c.DecortAPICall(ctx, "POST", kvmvm.ComputeGetAPI, urlValues)
if err != nil {
return nil, err
req := compute.GetRequest{
ComputeID: computeID,
}
compute := &kvmvm.ComputeGetResp{}
err = json.Unmarshal([]byte(computeRaw), compute)
compute, err := c.CloudAPI().Compute().Get(ctx, req)
if err != nil {
return nil, err
}
@@ -85,41 +72,43 @@ func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m
return compute, nil
}
func utilityDataK8sCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*K8SRecord, error) {
func utilityDataK8sCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8S, error) {
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("k8sId", strconv.Itoa(d.Get("k8s_id").(int)))
k8sRaw, err := c.DecortAPICall(ctx, "POST", K8sGetAPI, urlValues)
req := k8s.GetRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
}
k8s, err := c.CloudAPI().K8S().Get(ctx, req)
if err != nil {
return nil, err
}
k8s := &K8SRecord{}
err = json.Unmarshal([]byte(k8sRaw), k8s)
if err != nil {
return nil, err
}
return k8s, nil
}
func utilityK8sListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}, api string) (K8SList, error) {
func utilityK8sListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (k8s.ListK8SClusters, error) {
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("includedeleted", "false")
urlValues.Add("page", "0")
urlValues.Add("size", "0")
req := k8s.ListRequest{
IncludeDeleted: false,
}
k8sListRaw, err := c.DecortAPICall(ctx, "POST", api, urlValues)
k8sList, err := c.CloudAPI().K8S().List(ctx, req)
if err != nil {
return nil, err
}
return k8sList, nil
}
func utilityK8sListDeletedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (k8s.ListK8SClusters, error) {
c := m.(*controller.ControllerCfg)
req := k8s.ListDeletedRequest{}
k8sList, err := c.CloudAPI().K8S().ListDeleted(ctx, req)
if err != nil {
return nil, err
}
k8sList := K8SList{}
err = json.Unmarshal([]byte(k8sListRaw), &k8sList)
if err != nil {
return nil, err
}
return k8sList, nil
}

View File

@@ -34,40 +34,34 @@ package k8s
import (
"context"
"encoding/json"
"fmt"
"net/url"
"strconv"
"strings"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm"
)
func utilityDataK8sWgCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*K8SGroup, []kvmvm.ComputeGetResp, error) {
func utilityDataK8sWgCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.ItemK8SGroup, []compute.RecordCompute, error) {
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
k8sId := d.Get("k8s_id").(int)
wgId := d.Get("wg_id").(int)
k8sId := uint64(d.Get("k8s_id").(int))
wgId := uint64(d.Get("wg_id").(int))
urlValues.Add("k8sId", strconv.Itoa(k8sId))
k8sGetReq := k8s.GetRequest{
K8SID: k8sId,
}
k8sRaw, err := c.DecortAPICall(ctx, "POST", K8sGetAPI, urlValues)
cluster, err := c.CloudAPI().K8S().Get(ctx, k8sGetReq)
if err != nil {
return nil, nil, err
}
k8s := K8SRecord{}
err = json.Unmarshal([]byte(k8sRaw), &k8s)
if err != nil {
return nil, nil, err
}
curWg := K8SGroup{}
for _, wg := range k8s.K8SGroups.Workers {
if wg.ID == uint64(wgId) {
curWg := k8s.ItemK8SGroup{}
for _, wg := range cluster.K8SGroups.Workers {
if wg.ID == wgId {
curWg = wg
break
}
@@ -76,7 +70,7 @@ func utilityDataK8sWgCheckPresence(ctx context.Context, d *schema.ResourceData,
return nil, nil, fmt.Errorf("WG with id %v in k8s cluster %v not found", wgId, k8sId)
}
workersComputeList := make([]kvmvm.ComputeGetResp, 0, 0)
workersComputeList := make([]compute.RecordCompute, 0, 0)
for _, info := range curWg.DetailedInfo {
compute, err := utilityComputeCheckPresence(ctx, d, m, info.ID)
if err != nil {
@@ -89,9 +83,8 @@ func utilityDataK8sWgCheckPresence(ctx context.Context, d *schema.ResourceData,
return &curWg, workersComputeList, nil
}
func utilityK8sWgCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*K8SGroup, error) {
func utilityK8sWgCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.ItemK8SGroup, error) {
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
var wgId int
var k8sId int
var err error
@@ -113,27 +106,34 @@ func utilityK8sWgCheckPresence(ctx context.Context, d *schema.ResourceData, m in
k8sId = d.Get("k8s_id").(int)
}
urlValues.Add("k8sId", strconv.Itoa(k8sId))
req := k8s.GetRequest{
K8SID: uint64(k8sId),
}
resp, err := c.DecortAPICall(ctx, "POST", K8sGetAPI, urlValues)
cluster, err := c.CloudAPI().K8S().Get(ctx, req)
if err != nil {
return nil, err
}
if resp == "" {
return nil, err
}
var k8s K8SRecord
if err := json.Unmarshal([]byte(resp), &k8s); err != nil {
return nil, err
}
for _, wg := range k8s.K8SGroups.Workers {
for _, wg := range cluster.K8SGroups.Workers {
if wg.ID == uint64(wgId) {
return &wg, nil
}
}
return nil, fmt.Errorf("Not found wg with id: %v in k8s cluster: %v", wgId, k8s.ID)
return nil, fmt.Errorf("Not found wg with id: %v in k8s cluster: %v", wgId, cluster.ID)
}
func utilityK8sWgListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (k8s.ListK8SGroups, error) {
c := m.(*controller.ControllerCfg)
req := k8s.GetRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
}
cluster, err := c.CloudAPI().K8S().Get(ctx, req)
if err != nil {
return nil, err
}
return cluster.K8SGroups.Workers, nil
}