gos_tech_4.4.3 4.0.2
Tim Tkachev 2 years ago
parent 523d96189f
commit 9cf150437d

@ -1,5 +1,10 @@
## Version 4.0.1
## Version 4.0.2
## Features
- Added new data source decort_k8s_computes used to list all VMs in k8s cluster
- Added the ability to manipulate worker-groups inside resource decort_k8s
- Added new required field name to workers block
- Added new optional fields (labels, annotations, taints) to workers block
## Bug Fix
- Fixed incorrect state reading when creating/deleting port_forwarding in decort_kvmvm
- Fixed possible segmentation fault with multiple concurrent resource manipulations
- Fixed incorrect state reading in resource decort_k8s

@ -8,7 +8,7 @@ ZIPDIR = ./zip
BINARY=${NAME}.exe
WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH}
MAINPATH = ./cmd/decort/
VERSION=4.0.1
VERSION=4.0.2
#OS_ARCH=darwin_amd64
OS_ARCH=windows_amd64
#OS_ARCH=linux_amd64

@ -63,6 +63,7 @@ func newDataSourcesMap() map[string]*schema.Resource {
"decort_k8s_list_deleted": k8s.DataSourceK8sListDeleted(),
"decort_k8s_wg": k8s.DataSourceK8sWg(),
"decort_k8s_wg_list": k8s.DataSourceK8sWgList(),
"decort_k8s_computes": k8s.DataSourceK8sComputes(),
"decort_vins": vins.DataSourceVins(),
"decort_vins_list": vins.DataSourceVinsList(),
"decort_vins_audits": vins.DataSourceVinsAudits(),

@ -0,0 +1,123 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Tim Tkachev, <tvtkachev@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package k8s
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceK8sComputesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
cluster, err := utilityK8sCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(fmt.Sprint(cluster.ID))
flattenK8sDataComputes(d, cluster)
return nil
}
func computesSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"id": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"group_name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
},
}
}
func workerComputesSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{}
}
func dataSourceK8sComputesSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"k8s_id": {
Type: schema.TypeInt,
Required: true,
},
"masters": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: computesSchemaMake(),
},
},
"workers": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: computesSchemaMake(),
},
},
}
}
func DataSourceK8sComputes() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceK8sComputesRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout60s,
Default: &constants.Timeout60s,
},
Schema: dataSourceK8sComputesSchemaMake(),
}
}

@ -38,6 +38,46 @@ import (
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
)
func flattenK8sDataComputes(d *schema.ResourceData, cluster *k8s.RecordK8S) {
d.Set("k8s_id", cluster.ID)
d.Set("masters", flattenMasterComputes(cluster))
d.Set("workers", flattenWorkerComputes(cluster))
}
func flattenMasterComputes(cluster *k8s.RecordK8S) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, comp := range cluster.K8SGroups.Masters.DetailedInfo {
temp := map[string]interface{}{
"id": comp.ID,
"name": comp.Name,
"status": comp.Status,
"tech_status": comp.TechStatus,
"group_name": cluster.K8SGroups.Masters.Name,
}
res = append(res, temp)
}
return res
}
func flattenWorkerComputes(cluster *k8s.RecordK8S) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, wg := range cluster.K8SGroups.Workers {
for _, comp := range wg.DetailedInfo {
temp := map[string]interface{}{
"id": comp.ID,
"name": comp.Name,
"status": comp.Status,
"tech_status": comp.TechStatus,
"group_name": wg.Name,
}
res = append(res, temp)
}
}
return res
}
func flattenAclList(aclList k8s.ListACL) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, acl := range aclList {

@ -39,12 +39,16 @@ import (
)
type K8sNodeRecord struct {
ID int `json:"id"`
Name string `json:"name"`
Disk int `json:"disk"`
Cpu int `json:"cpu"`
Num int `json:"num"`
Ram int `json:"ram"`
ID int `json:"id"`
Name string `json:"name"`
Disk int `json:"disk"`
Cpu int `json:"cpu"`
Num int `json:"num"`
Ram int `json:"ram"`
// coming in future updates (curr. version 4.0.2)
// Labels []interface{} `json:"labels"`
// Annotations []interface{} `json:"annotations"`
// Taints []interface{} `json:"taints"`
DetailedInfo []struct {
ID int `json:"id"`
Name string `json:"name"`
@ -53,7 +57,7 @@ type K8sNodeRecord struct {
SepPool string `json:"SepPool"`
}
//K8sRecord represents k8s instance
// K8sRecord represents k8s instance
type K8sRecord struct {
AccountID int `json:"accountId"`
AccountName string `json:"accountName"`
@ -72,7 +76,7 @@ type K8sRecord struct {
type K8sRecordList []K8sRecord
//LbRecord represents load balancer instance
// LbRecord represents load balancer instance
type LbRecord struct {
ID int `json:"id"`
Name string `json:"name"`
@ -87,7 +91,7 @@ type LbRecord struct {
} `json:"primaryNode"`
}
//Blasphemous workaround for parsing Result value
// Blasphemous workaround for parsing Result value
type TaskResult int
func (r *TaskResult) UnmarshalJSON(b []byte) error {
@ -117,7 +121,7 @@ func (r *TaskResult) UnmarshalJSON(b []byte) error {
return nil
}
//AsyncTask represents a long task completion status
// AsyncTask represents a long task completion status
type AsyncTask struct {
AuditID string `json:"auditId"`
Completed bool `json:"completed"`
@ -136,7 +140,7 @@ type SshKeyConfig struct {
UserShell string
}
//FromSDK
// FromSDK
type K8SGroup struct {
Annotations []string `json:"annotations"`
CPU uint64 `json:"cpu"`

@ -52,7 +52,7 @@ func nodeWorkerDefault() K8sNodeRecord {
}
}
func parseNode(nodeList []interface{}) K8sNodeRecord {
func parseDefaultNode(nodeList []interface{}) K8sNodeRecord {
node := nodeList[0].(map[string]interface{})
return K8sNodeRecord{
@ -65,45 +65,6 @@ func parseNode(nodeList []interface{}) K8sNodeRecord {
}
}
func nodeToResource(node K8sNodeRecord) []interface{} {
mp := make(map[string]interface{})
mp["num"] = node.Num
mp["cpu"] = node.Cpu
mp["ram"] = node.Ram
mp["disk"] = node.Disk
return []interface{}{mp}
}
func nodeK8sSubresourceSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"num": {
Type: schema.TypeInt,
Required: true,
Description: "Number of nodes to create.",
},
"cpu": {
Type: schema.TypeInt,
Required: true,
Description: "Node CPU count.",
},
"ram": {
Type: schema.TypeInt,
Required: true,
Description: "Node RAM in MB.",
},
"disk": {
Type: schema.TypeInt,
Required: true,
Description: "Node boot disk size in GB.",
},
}
}
func mastersSchemaMake() map[string]*schema.Schema {
masters := masterGroupSchemaMake()
masters["num"] = &schema.Schema{
@ -137,41 +98,78 @@ func mastersSchemaMake() map[string]*schema.Schema {
ForceNew: true,
Description: "Node boot disk size in GB.",
}
return masters
}
func workersSchemaMake() map[string]*schema.Schema {
workers := k8sGroupListSchemaMake()
workers["num"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
Description: "Number of nodes to create.",
}
workers["sep_id"] = &schema.Schema{
Type: schema.TypeInt,
Optional: true,
}
workers["sep_pool"] = &schema.Schema{
Type: schema.TypeString,
Optional: true,
}
workers["cpu"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "Node CPU count.",
}
workers["ram"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "Node RAM in MB.",
}
workers["disk"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "Node boot disk size in GB.",
return map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
"num": {
Type: schema.TypeInt,
Required: true,
},
"ram": {
Type: schema.TypeInt,
Required: true,
},
"cpu": {
Type: schema.TypeInt,
Required: true,
},
"disk": {
Type: schema.TypeInt,
Required: true,
},
"annotations": {
Type: schema.TypeList,
Computed: true,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"detailed_info": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: detailedInfoSchemaMake(),
},
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"labels": {
Type: schema.TypeList,
Computed: true,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"taints": {
Type: schema.TypeList,
Computed: true,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
},
"sep_pool": {
Type: schema.TypeString,
Optional: true,
},
}
return workers
}

@ -95,7 +95,7 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
var masterNode K8sNodeRecord
if masters, ok := d.GetOk("masters"); ok {
masterNode = parseNode(masters.([]interface{}))
masterNode = parseDefaultNode(masters.([]interface{}))
} else {
masterNode = nodeMasterDefault()
}
@ -108,7 +108,7 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
var workerNode K8sNodeRecord
if workers, ok := d.GetOk("workers"); ok {
workerNode = parseNode(workers.([]interface{}))
workerNode = parseDefaultNode(workers.([]interface{}))
} else {
workerNode = nodeWorkerDefault()
}
@ -421,31 +421,9 @@ func resourceK8sUpdate(ctx context.Context, d *schema.ResourceData, m interface{
}
if d.HasChange("workers") {
wg := cluster.K8SGroups.Workers[0]
newWorkers := parseNode(d.Get("workers").([]interface{}))
if uint64(newWorkers.Num) > wg.Num {
req := k8s.WorkerAddRequest{
K8SID: cluster.ID,
WorkersGroupID: wg.ID,
Num: uint64(newWorkers.Num - int(wg.Num)),
}
if _, err := c.CloudAPI().K8S().WorkerAdd(ctx, req); err != nil {
return diag.FromErr(err)
}
} else {
for i := int(wg.Num) - 1; i >= newWorkers.Num; i-- {
req := k8s.DeleteWorkerFromGroupRequest{
K8SID: cluster.ID,
WorkersGroupID: wg.ID,
WorkerID: wg.DetailedInfo[i].ID,
}
if _, err := c.CloudAPI().K8S().DeleteWorkerFromGroup(ctx, req); err != nil {
return diag.FromErr(err)
}
}
err := handleWorkersChange(ctx, d, c, cluster)
if err != nil {
return diag.FromErr(err)
}
}
@ -564,7 +542,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: workersSchemaMake(),
},
@ -696,10 +673,10 @@ func ResourceK8s() *schema.Resource {
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout30m,
Read: &constants.Timeout300s,
Update: &constants.Timeout300s,
Delete: &constants.Timeout300s,
Default: &constants.Timeout300s,
Read: &constants.Timeout600s,
Update: &constants.Timeout600s,
Delete: &constants.Timeout600s,
Default: &constants.Timeout600s,
},
Schema: resourceK8sSchemaMake(),

@ -42,10 +42,143 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func handleWorkersChange(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, cluster *k8s.RecordK8S) error {
o, n := d.GetChange("workers")
old_set, _ := o.([]interface{})
new_set, _ := n.([]interface{})
old_len := len(old_set)
new_len := len(new_set)
if old_len > new_len {
deleted := workersDifference(old_set, new_set)
if err := deleteWGs(ctx, c, cluster, deleted); err != nil {
return err
}
} else if old_len < new_len {
added := workersDifference(old_set, new_set)
if err := addWGs(ctx, c, cluster, added); err != nil {
return err
}
}
if err := updateNum(ctx, c, cluster, old_set, new_set); err != nil {
return err
}
return nil
}
func updateNum(ctx context.Context, c *controller.ControllerCfg, cluster *k8s.RecordK8S, old_set []interface{}, new_set []interface{}) error {
for _, valOld := range old_set {
wgOld, _ := valOld.(map[string]interface{})
for _, valNew := range new_set {
wgNew, _ := valNew.(map[string]interface{})
if wgOld["id"] == wgNew["id"] {
oldNum := wgOld["num"].(int)
newNum := wgNew["num"].(int)
if oldNum < newNum {
req := k8s.WorkerAddRequest{
K8SID: cluster.ID,
WorkersGroupID: uint64(wgNew["id"].(int)),
Num: uint64(newNum - oldNum),
}
_, err := c.CloudAPI().K8S().WorkerAdd(ctx, req)
if err != nil {
return err
}
} else if oldNum > newNum {
for i := oldNum - 1; i >= newNum; i-- {
detailedInfo := wgOld["detailed_info"].([]interface{})
if len(detailedInfo) == 0 {
return nil
}
req := k8s.DeleteWorkerFromGroupRequest{
K8SID: cluster.ID,
WorkersGroupID: uint64(wgNew["id"].(int)),
WorkerID: uint64(detailedInfo[i].(map[string]interface{})["compute_id"].(int)),
}
_, err := c.CloudAPI().K8S().DeleteWorkerFromGroup(ctx, req)
if err != nil {
return err
}
}
}
}
}
}
return nil
}
func deleteWGs(ctx context.Context, c *controller.ControllerCfg, cluster *k8s.RecordK8S, deleted []interface{}) error {
for _, elem := range deleted {
found_wg := elem.(map[string]interface{})
req := k8s.WorkersGroupDeleteRequest{
K8SID: cluster.ID,
WorkersGroupID: uint64(found_wg["id"].(int)),
}
_, err := c.CloudAPI().K8S().WorkersGroupDelete(ctx, req)
if err != nil {
return err
}
}
return nil
}
func addWGs(ctx context.Context, c *controller.ControllerCfg, cluster *k8s.RecordK8S, added []interface{}) error {
for _, elem := range added {
found_wg := elem.(map[string]interface{})
req := k8s.WorkersGroupAddRequest{
K8SID: cluster.ID,
Name: found_wg["name"].(string),
WorkerSEPID: uint64(found_wg["sep_id"].(int)),
WorkerSEPPool: found_wg["sep_pool"].(string),
WorkerNum: uint64(found_wg["num"].(int)),
WorkerCPU: uint64(found_wg["cpu"].(int)),
WorkerRAM: uint64(found_wg["ram"].(int)),
WorkerDisk: uint64(found_wg["disk"].(int)),
}
labels, _ := found_wg["labels"].([]interface{})
for _, label := range labels {
req.Labels = append(req.Labels, label.(string))
}
annotations, _ := found_wg["annotations"].([]interface{})
for _, annotation := range annotations {
req.Annotations = append(req.Annotations, annotation.(string))
}
taints, _ := found_wg["taints"].([]interface{})
for _, taint := range taints {
req.Taints = append(req.Taints, taint.(string))
}
_, err := c.CloudAPI().K8S().WorkersGroupAdd(ctx, req)
if err != nil {
return err
}
}
return nil
}
func utilityK8sCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8S, error) {
c := m.(*controller.ControllerCfg)
k8sID, _ := strconv.ParseUint(d.Id(), 10, 64)
var k8sID uint64
if d.Id() != "" {
k8sID, _ = strconv.ParseUint(d.Id(), 10, 64)
} else {
k8sID = uint64(d.Get("k8s_id").(int))
}
req := k8s.GetRequest{
K8SID: k8sID,
}
@ -112,3 +245,29 @@ func utilityK8sListDeletedCheckPresence(ctx context.Context, d *schema.ResourceD
return k8sList, nil
}
func workersDifference(slice1 []interface{}, slice2 []interface{}) []interface{} {
var diff []interface{}
for i := 0; i < 2; i++ {
for _, s1 := range slice1 {
found := false
for _, s2 := range slice2 {
if s1.(map[string]interface{})["id"] == s2.(map[string]interface{})["id"] {
found = true
break
}
}
if !found {
diff = append(diff, s1)
}
}
if i == 0 {
slice1, slice2 = slice2, slice1
}
}
return diff
}

@ -0,0 +1,31 @@
#Расскомментируйте этот код,
#и внесите необходимые правки в версию и путь,
#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером
/*
terraform {
required_providers {
decort = {
source = "terraform.local/local/decort"
version = "<VERSION>"
}
}
}
*/
provider "decort" {
authenticator = "oauth2"
oauth2_url = "https://sso.digitalenergy.online"
controller_url = "https://mr4.digitalenergy.online"
app_id = ""
app_secret = ""
}
data "decort_k8s_computes" "computes" {
# ID кластера
# Обязательный параметр
k8s_id = 999
}
output "computes_out" {
value = data.decort_k8s_computes.computes
}

@ -1,15 +1,3 @@
/*
Пример использования
Ресурсов k8s cluster
Ресурсы позволяет:
1. Создавать
2. Редактировать
3. Удалять
*/
#Расскомментируйте этот код,
#и внесите необходимые правки в версию и путь,
#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером
@ -18,7 +6,7 @@ terraform {
required_providers {
decort = {
source = "terraform.local/local/decort"
version = "1.0.0"
version = "<VERSION>"
}
}
}
@ -32,7 +20,6 @@ provider "decort" {
app_secret = ""
}
resource "decort_k8s" "cluster" {
#имя кластера
#обязательный параметр
@ -50,11 +37,34 @@ resource "decort_k8s" "cluster" {
#тип - число
k8sci_id = 9
#сетевой плагин
#обязательный параметр
#тип - строка
network_plugin = "flannel"
#имя для первой worker group, созданной в кластере
#обязательный параметр
#тип - строка
wg_name = "workers"
# список labels для дефолтной worker группы
# опциональный параметр
# В скором времени параметры labels, annotations, taints будут полностью перенесены в блок workers
# тип - массив строк
labels = ["key1=val1", "key2=val2"]
# список annotations для дефолтной worker группы
# опциональный параметр
# В скором времени параметры labels, annotations, taints будут полностью перенесены в блок workers
# тип - массив строк
annotations = ["key1=val1", "key2=val2"]
# список taints для дефолтной worker группы
# опциональный параметр
# В скором времени параметры labels, annotations, taints будут полностью перенесены в блок workers
# тип - массив строк
taints = ["key1=val1", "key2=val2"]
#настройка мастер node или nodes
#опциональный параметр
#максимальное кол-во элементов - 1
@ -83,11 +93,16 @@ resource "decort_k8s" "cluster" {
disk = 10
}
#настройка worker node или nodes
#настройка worker группы
#опциональный параметр
#максимальное кол-во элементов - 1
#тип - список нод
#Первая указанная воркер-группа должна соответствовать изначально созданной вместе с кластером.
# labels, annotations, taints для дефолтной worker группы указываются в корне ресурса при создании кластера.
workers {
#наименование worker группы
#обязательный параметр
#тип - строка
name = "workers_wg"
#кол-во node
#обязательный параметр
#тип - число
@ -107,6 +122,70 @@ resource "decort_k8s" "cluster" {
#обязательный параметр
#тип - число
disk = 10
#Идентификатор SEP
#опциональный параметр
#тип - число
sep_id = 1010
#Имя SEP pool'a
#опциональный параметр
#тип - строка
sep_pool = "data01"
}
#...Далее можно создавать произвольное кол-во дополнительных worker групп
# labels, annotations и taints для последующих групп указываются непосредственно в блоке workers
workers {
#наименование worker группы
#обязательный параметр
#тип - строка
name = "additional_wg"
#кол-во node
#обязательный параметр
#тип - число
num = 2
#кол-во cpu
#обязательный параметр
#тип - число
cpu = 2
#кол-во RAM в Мбайтах
#обязательный параметр
#тип - число
ram = 4096
#размер диска в Гбайтах
#обязательный параметр
#тип - число
disk = 10
#Идентификатор SEP
#опциональный параметр
#тип - число
sep_id = 1010
#Имя SEP pool'a
#опциональный параметр
#тип - строка
sep_pool = "data01"
#Список лейблов
#опциональный параметр
#тип - массив строк
labels = ["label1=value1", "label2=value2"]
#Список аннотаций
#опциональный параметр
#тип - массив строк
annotations = ["key1=value1", "key2=value2"]
#Список taints
#опциональный параметр
#тип - массив строк
taints = ["key1=value1:NoSchedule", "key2=value2:NoExecute"]
}
}

Loading…
Cancel
Save