This commit is contained in:
2023-05-23 16:48:16 +03:00
parent 523d96189f
commit 9cf150437d
11 changed files with 554 additions and 137 deletions

View File

@@ -0,0 +1,123 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Tim Tkachev, <tvtkachev@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package k8s
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceK8sComputesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
cluster, err := utilityK8sCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(fmt.Sprint(cluster.ID))
flattenK8sDataComputes(d, cluster)
return nil
}
func computesSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"id": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"group_name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
},
}
}
func workerComputesSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{}
}
func dataSourceK8sComputesSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"k8s_id": {
Type: schema.TypeInt,
Required: true,
},
"masters": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: computesSchemaMake(),
},
},
"workers": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: computesSchemaMake(),
},
},
}
}
func DataSourceK8sComputes() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceK8sComputesRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout60s,
Default: &constants.Timeout60s,
},
Schema: dataSourceK8sComputesSchemaMake(),
}
}

View File

@@ -38,6 +38,46 @@ import (
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
)
func flattenK8sDataComputes(d *schema.ResourceData, cluster *k8s.RecordK8S) {
d.Set("k8s_id", cluster.ID)
d.Set("masters", flattenMasterComputes(cluster))
d.Set("workers", flattenWorkerComputes(cluster))
}
func flattenMasterComputes(cluster *k8s.RecordK8S) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, comp := range cluster.K8SGroups.Masters.DetailedInfo {
temp := map[string]interface{}{
"id": comp.ID,
"name": comp.Name,
"status": comp.Status,
"tech_status": comp.TechStatus,
"group_name": cluster.K8SGroups.Masters.Name,
}
res = append(res, temp)
}
return res
}
func flattenWorkerComputes(cluster *k8s.RecordK8S) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, wg := range cluster.K8SGroups.Workers {
for _, comp := range wg.DetailedInfo {
temp := map[string]interface{}{
"id": comp.ID,
"name": comp.Name,
"status": comp.Status,
"tech_status": comp.TechStatus,
"group_name": wg.Name,
}
res = append(res, temp)
}
}
return res
}
func flattenAclList(aclList k8s.ListACL) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, acl := range aclList {

View File

@@ -39,12 +39,16 @@ import (
)
type K8sNodeRecord struct {
ID int `json:"id"`
Name string `json:"name"`
Disk int `json:"disk"`
Cpu int `json:"cpu"`
Num int `json:"num"`
Ram int `json:"ram"`
ID int `json:"id"`
Name string `json:"name"`
Disk int `json:"disk"`
Cpu int `json:"cpu"`
Num int `json:"num"`
Ram int `json:"ram"`
// coming in future updates (curr. version 4.0.2)
// Labels []interface{} `json:"labels"`
// Annotations []interface{} `json:"annotations"`
// Taints []interface{} `json:"taints"`
DetailedInfo []struct {
ID int `json:"id"`
Name string `json:"name"`
@@ -53,7 +57,7 @@ type K8sNodeRecord struct {
SepPool string `json:"SepPool"`
}
//K8sRecord represents k8s instance
// K8sRecord represents k8s instance
type K8sRecord struct {
AccountID int `json:"accountId"`
AccountName string `json:"accountName"`
@@ -72,7 +76,7 @@ type K8sRecord struct {
type K8sRecordList []K8sRecord
//LbRecord represents load balancer instance
// LbRecord represents load balancer instance
type LbRecord struct {
ID int `json:"id"`
Name string `json:"name"`
@@ -87,7 +91,7 @@ type LbRecord struct {
} `json:"primaryNode"`
}
//Blasphemous workaround for parsing Result value
// Blasphemous workaround for parsing Result value
type TaskResult int
func (r *TaskResult) UnmarshalJSON(b []byte) error {
@@ -117,7 +121,7 @@ func (r *TaskResult) UnmarshalJSON(b []byte) error {
return nil
}
//AsyncTask represents a long task completion status
// AsyncTask represents a long task completion status
type AsyncTask struct {
AuditID string `json:"auditId"`
Completed bool `json:"completed"`
@@ -136,7 +140,7 @@ type SshKeyConfig struct {
UserShell string
}
//FromSDK
// FromSDK
type K8SGroup struct {
Annotations []string `json:"annotations"`
CPU uint64 `json:"cpu"`

View File

@@ -52,7 +52,7 @@ func nodeWorkerDefault() K8sNodeRecord {
}
}
func parseNode(nodeList []interface{}) K8sNodeRecord {
func parseDefaultNode(nodeList []interface{}) K8sNodeRecord {
node := nodeList[0].(map[string]interface{})
return K8sNodeRecord{
@@ -65,45 +65,6 @@ func parseNode(nodeList []interface{}) K8sNodeRecord {
}
}
func nodeToResource(node K8sNodeRecord) []interface{} {
mp := make(map[string]interface{})
mp["num"] = node.Num
mp["cpu"] = node.Cpu
mp["ram"] = node.Ram
mp["disk"] = node.Disk
return []interface{}{mp}
}
func nodeK8sSubresourceSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"num": {
Type: schema.TypeInt,
Required: true,
Description: "Number of nodes to create.",
},
"cpu": {
Type: schema.TypeInt,
Required: true,
Description: "Node CPU count.",
},
"ram": {
Type: schema.TypeInt,
Required: true,
Description: "Node RAM in MB.",
},
"disk": {
Type: schema.TypeInt,
Required: true,
Description: "Node boot disk size in GB.",
},
}
}
func mastersSchemaMake() map[string]*schema.Schema {
masters := masterGroupSchemaMake()
masters["num"] = &schema.Schema{
@@ -137,41 +98,78 @@ func mastersSchemaMake() map[string]*schema.Schema {
ForceNew: true,
Description: "Node boot disk size in GB.",
}
return masters
}
func workersSchemaMake() map[string]*schema.Schema {
workers := k8sGroupListSchemaMake()
workers["num"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
Description: "Number of nodes to create.",
return map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
"num": {
Type: schema.TypeInt,
Required: true,
},
"ram": {
Type: schema.TypeInt,
Required: true,
},
"cpu": {
Type: schema.TypeInt,
Required: true,
},
"disk": {
Type: schema.TypeInt,
Required: true,
},
"annotations": {
Type: schema.TypeList,
Computed: true,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"detailed_info": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: detailedInfoSchemaMake(),
},
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"labels": {
Type: schema.TypeList,
Computed: true,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"taints": {
Type: schema.TypeList,
Computed: true,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
},
"sep_pool": {
Type: schema.TypeString,
Optional: true,
},
}
workers["sep_id"] = &schema.Schema{
Type: schema.TypeInt,
Optional: true,
}
workers["sep_pool"] = &schema.Schema{
Type: schema.TypeString,
Optional: true,
}
workers["cpu"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "Node CPU count.",
}
workers["ram"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "Node RAM in MB.",
}
workers["disk"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "Node boot disk size in GB.",
}
return workers
}

View File

@@ -95,7 +95,7 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
var masterNode K8sNodeRecord
if masters, ok := d.GetOk("masters"); ok {
masterNode = parseNode(masters.([]interface{}))
masterNode = parseDefaultNode(masters.([]interface{}))
} else {
masterNode = nodeMasterDefault()
}
@@ -108,7 +108,7 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
var workerNode K8sNodeRecord
if workers, ok := d.GetOk("workers"); ok {
workerNode = parseNode(workers.([]interface{}))
workerNode = parseDefaultNode(workers.([]interface{}))
} else {
workerNode = nodeWorkerDefault()
}
@@ -421,31 +421,9 @@ func resourceK8sUpdate(ctx context.Context, d *schema.ResourceData, m interface{
}
if d.HasChange("workers") {
wg := cluster.K8SGroups.Workers[0]
newWorkers := parseNode(d.Get("workers").([]interface{}))
if uint64(newWorkers.Num) > wg.Num {
req := k8s.WorkerAddRequest{
K8SID: cluster.ID,
WorkersGroupID: wg.ID,
Num: uint64(newWorkers.Num - int(wg.Num)),
}
if _, err := c.CloudAPI().K8S().WorkerAdd(ctx, req); err != nil {
return diag.FromErr(err)
}
} else {
for i := int(wg.Num) - 1; i >= newWorkers.Num; i-- {
req := k8s.DeleteWorkerFromGroupRequest{
K8SID: cluster.ID,
WorkersGroupID: wg.ID,
WorkerID: wg.DetailedInfo[i].ID,
}
if _, err := c.CloudAPI().K8S().DeleteWorkerFromGroup(ctx, req); err != nil {
return diag.FromErr(err)
}
}
err := handleWorkersChange(ctx, d, c, cluster)
if err != nil {
return diag.FromErr(err)
}
}
@@ -564,7 +542,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: workersSchemaMake(),
},
@@ -696,10 +673,10 @@ func ResourceK8s() *schema.Resource {
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout30m,
Read: &constants.Timeout300s,
Update: &constants.Timeout300s,
Delete: &constants.Timeout300s,
Default: &constants.Timeout300s,
Read: &constants.Timeout600s,
Update: &constants.Timeout600s,
Delete: &constants.Timeout600s,
Default: &constants.Timeout600s,
},
Schema: resourceK8sSchemaMake(),

View File

@@ -42,10 +42,143 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func handleWorkersChange(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, cluster *k8s.RecordK8S) error {
o, n := d.GetChange("workers")
old_set, _ := o.([]interface{})
new_set, _ := n.([]interface{})
old_len := len(old_set)
new_len := len(new_set)
if old_len > new_len {
deleted := workersDifference(old_set, new_set)
if err := deleteWGs(ctx, c, cluster, deleted); err != nil {
return err
}
} else if old_len < new_len {
added := workersDifference(old_set, new_set)
if err := addWGs(ctx, c, cluster, added); err != nil {
return err
}
}
if err := updateNum(ctx, c, cluster, old_set, new_set); err != nil {
return err
}
return nil
}
func updateNum(ctx context.Context, c *controller.ControllerCfg, cluster *k8s.RecordK8S, old_set []interface{}, new_set []interface{}) error {
for _, valOld := range old_set {
wgOld, _ := valOld.(map[string]interface{})
for _, valNew := range new_set {
wgNew, _ := valNew.(map[string]interface{})
if wgOld["id"] == wgNew["id"] {
oldNum := wgOld["num"].(int)
newNum := wgNew["num"].(int)
if oldNum < newNum {
req := k8s.WorkerAddRequest{
K8SID: cluster.ID,
WorkersGroupID: uint64(wgNew["id"].(int)),
Num: uint64(newNum - oldNum),
}
_, err := c.CloudAPI().K8S().WorkerAdd(ctx, req)
if err != nil {
return err
}
} else if oldNum > newNum {
for i := oldNum - 1; i >= newNum; i-- {
detailedInfo := wgOld["detailed_info"].([]interface{})
if len(detailedInfo) == 0 {
return nil
}
req := k8s.DeleteWorkerFromGroupRequest{
K8SID: cluster.ID,
WorkersGroupID: uint64(wgNew["id"].(int)),
WorkerID: uint64(detailedInfo[i].(map[string]interface{})["compute_id"].(int)),
}
_, err := c.CloudAPI().K8S().DeleteWorkerFromGroup(ctx, req)
if err != nil {
return err
}
}
}
}
}
}
return nil
}
func deleteWGs(ctx context.Context, c *controller.ControllerCfg, cluster *k8s.RecordK8S, deleted []interface{}) error {
for _, elem := range deleted {
found_wg := elem.(map[string]interface{})
req := k8s.WorkersGroupDeleteRequest{
K8SID: cluster.ID,
WorkersGroupID: uint64(found_wg["id"].(int)),
}
_, err := c.CloudAPI().K8S().WorkersGroupDelete(ctx, req)
if err != nil {
return err
}
}
return nil
}
func addWGs(ctx context.Context, c *controller.ControllerCfg, cluster *k8s.RecordK8S, added []interface{}) error {
for _, elem := range added {
found_wg := elem.(map[string]interface{})
req := k8s.WorkersGroupAddRequest{
K8SID: cluster.ID,
Name: found_wg["name"].(string),
WorkerSEPID: uint64(found_wg["sep_id"].(int)),
WorkerSEPPool: found_wg["sep_pool"].(string),
WorkerNum: uint64(found_wg["num"].(int)),
WorkerCPU: uint64(found_wg["cpu"].(int)),
WorkerRAM: uint64(found_wg["ram"].(int)),
WorkerDisk: uint64(found_wg["disk"].(int)),
}
labels, _ := found_wg["labels"].([]interface{})
for _, label := range labels {
req.Labels = append(req.Labels, label.(string))
}
annotations, _ := found_wg["annotations"].([]interface{})
for _, annotation := range annotations {
req.Annotations = append(req.Annotations, annotation.(string))
}
taints, _ := found_wg["taints"].([]interface{})
for _, taint := range taints {
req.Taints = append(req.Taints, taint.(string))
}
_, err := c.CloudAPI().K8S().WorkersGroupAdd(ctx, req)
if err != nil {
return err
}
}
return nil
}
func utilityK8sCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8S, error) {
c := m.(*controller.ControllerCfg)
k8sID, _ := strconv.ParseUint(d.Id(), 10, 64)
var k8sID uint64
if d.Id() != "" {
k8sID, _ = strconv.ParseUint(d.Id(), 10, 64)
} else {
k8sID = uint64(d.Get("k8s_id").(int))
}
req := k8s.GetRequest{
K8SID: k8sID,
}
@@ -112,3 +245,29 @@ func utilityK8sListDeletedCheckPresence(ctx context.Context, d *schema.ResourceD
return k8sList, nil
}
func workersDifference(slice1 []interface{}, slice2 []interface{}) []interface{} {
var diff []interface{}
for i := 0; i < 2; i++ {
for _, s1 := range slice1 {
found := false
for _, s2 := range slice2 {
if s1.(map[string]interface{})["id"] == s2.(map[string]interface{})["id"] {
found = true
break
}
}
if !found {
diff = append(diff, s1)
}
}
if i == 0 {
slice1, slice2 = slice2, slice1
}
}
return diff
}