Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 9cf150437d | |||
| 523d96189f |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -2,4 +2,4 @@ decort/vendor/
|
||||
examples/
|
||||
url_scrapping/
|
||||
terraform-provider-decort*
|
||||
.vscode/
|
||||
.vscode/
|
||||
34
CHANGELOG.md
34
CHANGELOG.md
@@ -1,30 +1,10 @@
|
||||
## Version 4.0.0
|
||||
## Version 4.0.2
|
||||
|
||||
### Features
|
||||
- Updated provider to the newest DECORT version (3.8.6):
|
||||
- resource_k8s now has "network_plugin" required field
|
||||
- Added "start" argument to resource_k8s
|
||||
- data_source_bservice, resource_bservice "computes" and "groups" fields now have more in-depth information
|
||||
- resource_compute now has full boot disk information
|
||||
- data_source_extnet now has additional field "e_burst" inside "default_qos" substructure
|
||||
- data_source_rg_list, data_source_rg, resource_rg, resource_account and data_source_account now have two additional fields:
|
||||
- cpu_allocation_parameter
|
||||
- cpu_allocation_ratio
|
||||
- unnecessary fields were removed from all disks data sources && resources (except for unattached disks), to be exact:
|
||||
- boot_partition
|
||||
- guid
|
||||
- disk_path
|
||||
- iqn
|
||||
- login
|
||||
- milestones
|
||||
- password
|
||||
- purge_attempts
|
||||
- reality_device_number
|
||||
- reference_id
|
||||
- Removed automatic switch between cloudbroker/cloudapi (admin/user modes)
|
||||
## Features
|
||||
- Added new data source decort_k8s_computes used to list all VMs in k8s cluster
|
||||
- Added the ability to manipulate worker-groups inside resource decort_k8s
|
||||
- Added new required field name to workers block
|
||||
- Added new optional fields (labels, annotations, taints) to workers block
|
||||
|
||||
## Bug Fix
|
||||
- Fixed resource_compute error when creating a new instance with EXTNET type network
|
||||
- Fixed quota record marshalling in decort_resgroup resource
|
||||
- Fixed GID validation for multiple resources
|
||||
- Fixed possible 'addUser' error when importing resource_account with already added user
|
||||
- Fixed incorrect state reading in resource decort_k8s
|
||||
|
||||
2
Makefile
2
Makefile
@@ -8,7 +8,7 @@ ZIPDIR = ./zip
|
||||
BINARY=${NAME}.exe
|
||||
WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH}
|
||||
MAINPATH = ./cmd/decort/
|
||||
VERSION=4.0.0
|
||||
VERSION=4.0.2
|
||||
#OS_ARCH=darwin_amd64
|
||||
OS_ARCH=windows_amd64
|
||||
#OS_ARCH=linux_amd64
|
||||
|
||||
2
go.mod
2
go.mod
@@ -9,7 +9,7 @@ require (
|
||||
github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
golang.org/x/net v0.5.0
|
||||
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.0
|
||||
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.3
|
||||
)
|
||||
|
||||
require (
|
||||
|
||||
4
go.sum
4
go.sum
@@ -332,5 +332,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.0 h1:y8F3bzEb8mROEYnoG495AxpbSTEkfSpFh97sZe42cJE=
|
||||
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.0/go.mod h1:YP57mpXh60xeRERVaCehn+l0S7Qe24trVll1EvrKzds=
|
||||
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.3 h1:jrBl90lRfp34bE3m30N3mYIDTSlaPySuo+pE7bK4eOI=
|
||||
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.3/go.mod h1:szsTGa73O75ckCWVGJPvTtRbhA/ubuYrYhMkPjvHlmE=
|
||||
|
||||
@@ -63,6 +63,7 @@ func newDataSourcesMap() map[string]*schema.Resource {
|
||||
"decort_k8s_list_deleted": k8s.DataSourceK8sListDeleted(),
|
||||
"decort_k8s_wg": k8s.DataSourceK8sWg(),
|
||||
"decort_k8s_wg_list": k8s.DataSourceK8sWgList(),
|
||||
"decort_k8s_computes": k8s.DataSourceK8sComputes(),
|
||||
"decort_vins": vins.DataSourceVins(),
|
||||
"decort_vins_list": vins.DataSourceVinsList(),
|
||||
"decort_vins_audits": vins.DataSourceVinsAudits(),
|
||||
|
||||
123
internal/service/cloudapi/k8s/data_source_k8s_computes.go
Normal file
123
internal/service/cloudapi/k8s/data_source_k8s_computes.go
Normal file
@@ -0,0 +1,123 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
Tim Tkachev, <tvtkachev@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package k8s
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceK8sComputesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
cluster, err := utilityK8sCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
d.SetId("")
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.SetId(fmt.Sprint(cluster.ID))
|
||||
flattenK8sDataComputes(d, cluster)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func computesSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"group_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func workerComputesSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{}
|
||||
}
|
||||
|
||||
func dataSourceK8sComputesSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"k8s_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"masters": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: computesSchemaMake(),
|
||||
},
|
||||
},
|
||||
"workers": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: computesSchemaMake(),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func DataSourceK8sComputes() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceK8sComputesRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout60s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceK8sComputesSchemaMake(),
|
||||
}
|
||||
}
|
||||
@@ -38,6 +38,46 @@ import (
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
|
||||
)
|
||||
|
||||
func flattenK8sDataComputes(d *schema.ResourceData, cluster *k8s.RecordK8S) {
|
||||
d.Set("k8s_id", cluster.ID)
|
||||
d.Set("masters", flattenMasterComputes(cluster))
|
||||
d.Set("workers", flattenWorkerComputes(cluster))
|
||||
}
|
||||
|
||||
func flattenMasterComputes(cluster *k8s.RecordK8S) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, comp := range cluster.K8SGroups.Masters.DetailedInfo {
|
||||
temp := map[string]interface{}{
|
||||
"id": comp.ID,
|
||||
"name": comp.Name,
|
||||
"status": comp.Status,
|
||||
"tech_status": comp.TechStatus,
|
||||
"group_name": cluster.K8SGroups.Masters.Name,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenWorkerComputes(cluster *k8s.RecordK8S) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, wg := range cluster.K8SGroups.Workers {
|
||||
for _, comp := range wg.DetailedInfo {
|
||||
temp := map[string]interface{}{
|
||||
"id": comp.ID,
|
||||
"name": comp.Name,
|
||||
"status": comp.Status,
|
||||
"tech_status": comp.TechStatus,
|
||||
"group_name": wg.Name,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenAclList(aclList k8s.ListACL) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, acl := range aclList {
|
||||
|
||||
@@ -39,12 +39,16 @@ import (
|
||||
)
|
||||
|
||||
type K8sNodeRecord struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Disk int `json:"disk"`
|
||||
Cpu int `json:"cpu"`
|
||||
Num int `json:"num"`
|
||||
Ram int `json:"ram"`
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Disk int `json:"disk"`
|
||||
Cpu int `json:"cpu"`
|
||||
Num int `json:"num"`
|
||||
Ram int `json:"ram"`
|
||||
// coming in future updates (curr. version 4.0.2)
|
||||
// Labels []interface{} `json:"labels"`
|
||||
// Annotations []interface{} `json:"annotations"`
|
||||
// Taints []interface{} `json:"taints"`
|
||||
DetailedInfo []struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
@@ -53,7 +57,7 @@ type K8sNodeRecord struct {
|
||||
SepPool string `json:"SepPool"`
|
||||
}
|
||||
|
||||
//K8sRecord represents k8s instance
|
||||
// K8sRecord represents k8s instance
|
||||
type K8sRecord struct {
|
||||
AccountID int `json:"accountId"`
|
||||
AccountName string `json:"accountName"`
|
||||
@@ -72,7 +76,7 @@ type K8sRecord struct {
|
||||
|
||||
type K8sRecordList []K8sRecord
|
||||
|
||||
//LbRecord represents load balancer instance
|
||||
// LbRecord represents load balancer instance
|
||||
type LbRecord struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
@@ -87,7 +91,7 @@ type LbRecord struct {
|
||||
} `json:"primaryNode"`
|
||||
}
|
||||
|
||||
//Blasphemous workaround for parsing Result value
|
||||
// Blasphemous workaround for parsing Result value
|
||||
type TaskResult int
|
||||
|
||||
func (r *TaskResult) UnmarshalJSON(b []byte) error {
|
||||
@@ -117,7 +121,7 @@ func (r *TaskResult) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//AsyncTask represents a long task completion status
|
||||
// AsyncTask represents a long task completion status
|
||||
type AsyncTask struct {
|
||||
AuditID string `json:"auditId"`
|
||||
Completed bool `json:"completed"`
|
||||
@@ -136,7 +140,7 @@ type SshKeyConfig struct {
|
||||
UserShell string
|
||||
}
|
||||
|
||||
//FromSDK
|
||||
// FromSDK
|
||||
type K8SGroup struct {
|
||||
Annotations []string `json:"annotations"`
|
||||
CPU uint64 `json:"cpu"`
|
||||
|
||||
@@ -52,7 +52,7 @@ func nodeWorkerDefault() K8sNodeRecord {
|
||||
}
|
||||
}
|
||||
|
||||
func parseNode(nodeList []interface{}) K8sNodeRecord {
|
||||
func parseDefaultNode(nodeList []interface{}) K8sNodeRecord {
|
||||
node := nodeList[0].(map[string]interface{})
|
||||
|
||||
return K8sNodeRecord{
|
||||
@@ -65,45 +65,6 @@ func parseNode(nodeList []interface{}) K8sNodeRecord {
|
||||
}
|
||||
}
|
||||
|
||||
func nodeToResource(node K8sNodeRecord) []interface{} {
|
||||
mp := make(map[string]interface{})
|
||||
|
||||
mp["num"] = node.Num
|
||||
mp["cpu"] = node.Cpu
|
||||
mp["ram"] = node.Ram
|
||||
mp["disk"] = node.Disk
|
||||
|
||||
return []interface{}{mp}
|
||||
}
|
||||
|
||||
func nodeK8sSubresourceSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"num": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "Number of nodes to create.",
|
||||
},
|
||||
|
||||
"cpu": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "Node CPU count.",
|
||||
},
|
||||
|
||||
"ram": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "Node RAM in MB.",
|
||||
},
|
||||
|
||||
"disk": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "Node boot disk size in GB.",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func mastersSchemaMake() map[string]*schema.Schema {
|
||||
masters := masterGroupSchemaMake()
|
||||
masters["num"] = &schema.Schema{
|
||||
@@ -137,41 +98,78 @@ func mastersSchemaMake() map[string]*schema.Schema {
|
||||
ForceNew: true,
|
||||
Description: "Node boot disk size in GB.",
|
||||
}
|
||||
|
||||
return masters
|
||||
}
|
||||
|
||||
func workersSchemaMake() map[string]*schema.Schema {
|
||||
workers := k8sGroupListSchemaMake()
|
||||
workers["num"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "Number of nodes to create.",
|
||||
return map[string]*schema.Schema{
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"num": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"ram": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"cpu": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"disk": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"annotations": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"detailed_info": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: detailedInfoSchemaMake(),
|
||||
},
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"labels": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"taints": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"sep_pool": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
}
|
||||
workers["sep_id"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
}
|
||||
workers["sep_pool"] = &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
}
|
||||
workers["cpu"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Description: "Node CPU count.",
|
||||
}
|
||||
workers["ram"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Description: "Node RAM in MB.",
|
||||
}
|
||||
workers["disk"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Description: "Node boot disk size in GB.",
|
||||
}
|
||||
return workers
|
||||
}
|
||||
|
||||
@@ -95,7 +95,7 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
|
||||
|
||||
var masterNode K8sNodeRecord
|
||||
if masters, ok := d.GetOk("masters"); ok {
|
||||
masterNode = parseNode(masters.([]interface{}))
|
||||
masterNode = parseDefaultNode(masters.([]interface{}))
|
||||
} else {
|
||||
masterNode = nodeMasterDefault()
|
||||
}
|
||||
@@ -108,7 +108,7 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
|
||||
|
||||
var workerNode K8sNodeRecord
|
||||
if workers, ok := d.GetOk("workers"); ok {
|
||||
workerNode = parseNode(workers.([]interface{}))
|
||||
workerNode = parseDefaultNode(workers.([]interface{}))
|
||||
} else {
|
||||
workerNode = nodeWorkerDefault()
|
||||
}
|
||||
@@ -217,7 +217,7 @@ func resourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{})
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
enableReq := k8s.DisabelEnableRequest{
|
||||
enableReq := k8s.DisableEnableRequest{
|
||||
K8SID: id,
|
||||
}
|
||||
|
||||
@@ -374,7 +374,7 @@ func resourceK8sUpdate(ctx context.Context, d *schema.ResourceData, m interface{
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
enableReq := k8s.DisabelEnableRequest{
|
||||
enableReq := k8s.DisableEnableRequest{
|
||||
K8SID: id,
|
||||
}
|
||||
|
||||
@@ -421,31 +421,9 @@ func resourceK8sUpdate(ctx context.Context, d *schema.ResourceData, m interface{
|
||||
}
|
||||
|
||||
if d.HasChange("workers") {
|
||||
wg := cluster.K8SGroups.Workers[0]
|
||||
|
||||
newWorkers := parseNode(d.Get("workers").([]interface{}))
|
||||
|
||||
if uint64(newWorkers.Num) > wg.Num {
|
||||
req := k8s.WorkerAddRequest{
|
||||
K8SID: cluster.ID,
|
||||
WorkersGroupID: wg.ID,
|
||||
Num: uint64(newWorkers.Num - int(wg.Num)),
|
||||
}
|
||||
|
||||
if _, err := c.CloudAPI().K8S().WorkerAdd(ctx, req); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
} else {
|
||||
for i := int(wg.Num) - 1; i >= newWorkers.Num; i-- {
|
||||
req := k8s.DeleteWorkerFromGroupRequest{
|
||||
K8SID: cluster.ID,
|
||||
WorkersGroupID: wg.ID,
|
||||
WorkerID: wg.DetailedInfo[i].ID,
|
||||
}
|
||||
if _, err := c.CloudAPI().K8S().DeleteWorkerFromGroup(ctx, req); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
err := handleWorkersChange(ctx, d, c, cluster)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -564,7 +542,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: workersSchemaMake(),
|
||||
},
|
||||
@@ -696,10 +673,10 @@ func ResourceK8s() *schema.Resource {
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Create: &constants.Timeout30m,
|
||||
Read: &constants.Timeout300s,
|
||||
Update: &constants.Timeout300s,
|
||||
Delete: &constants.Timeout300s,
|
||||
Default: &constants.Timeout300s,
|
||||
Read: &constants.Timeout600s,
|
||||
Update: &constants.Timeout600s,
|
||||
Delete: &constants.Timeout600s,
|
||||
Default: &constants.Timeout600s,
|
||||
},
|
||||
|
||||
Schema: resourceK8sSchemaMake(),
|
||||
|
||||
@@ -60,11 +60,28 @@ func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interfac
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := k8s.WorkersGroupAddRequest{
|
||||
K8SID: uint64(d.Get("k8s_id").(int)),
|
||||
Name: d.Get("name").(string),
|
||||
WorkerNum: uint64(d.Get("num").(int)),
|
||||
WorkerCPU: uint64(d.Get("cpu").(int)),
|
||||
WorkerRAM: uint64(d.Get("ram").(int)),
|
||||
K8SID: uint64(d.Get("k8s_id").(int)),
|
||||
Name: d.Get("name").(string),
|
||||
WorkerNum: uint64(d.Get("num").(int)),
|
||||
WorkerCPU: uint64(d.Get("cpu").(int)),
|
||||
WorkerRAM: uint64(d.Get("ram").(int)),
|
||||
WorkerSEPID: uint64(d.Get("worker_sep_id").(int)),
|
||||
WorkerSEPPool: d.Get("worker_sep_pool").(string),
|
||||
}
|
||||
|
||||
labels, _ := d.Get("labels").([]interface{})
|
||||
for _, label := range labels {
|
||||
req.Labels = append(req.Labels, label.(string))
|
||||
}
|
||||
|
||||
annotations, _ := d.Get("annotations").([]interface{})
|
||||
for _, annotation := range annotations {
|
||||
req.Annotations = append(req.Annotations, annotation.(string))
|
||||
}
|
||||
|
||||
taints, _ := d.Get("taints").([]interface{})
|
||||
for _, taint := range taints {
|
||||
req.Taints = append(req.Taints, taint.(string))
|
||||
}
|
||||
|
||||
if d.Get("disk") == nil {
|
||||
@@ -111,6 +128,7 @@ func resourceK8sWgRead(ctx context.Context, d *schema.ResourceData, m interface{
|
||||
} else {
|
||||
d.Set("k8s_id", d.Get("k8s_id"))
|
||||
}
|
||||
|
||||
d.SetId(strings.Split(d.Id(), "#")[0])
|
||||
|
||||
flattenWg(d, *wg, workersComputeList)
|
||||
@@ -234,6 +252,38 @@ func resourceK8sWgSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
Description: "Worker node boot disk size. If unspecified or 0, size is defined by OS image size.",
|
||||
},
|
||||
"labels": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"annotations": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"taints": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"worker_sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"worker_sep_pool": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"wg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -246,31 +296,10 @@ func resourceK8sWgSchemaMake() map[string]*schema.Schema {
|
||||
Schema: detailedInfoSchemaMake(),
|
||||
},
|
||||
},
|
||||
"labels": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"annotations": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"taints": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -289,8 +318,8 @@ func ResourceK8sWg() *schema.Resource {
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Create: &constants.Timeout600s,
|
||||
Read: &constants.Timeout300s,
|
||||
Update: &constants.Timeout300s,
|
||||
Read: &constants.Timeout600s,
|
||||
Update: &constants.Timeout600s,
|
||||
Delete: &constants.Timeout300s,
|
||||
Default: &constants.Timeout300s,
|
||||
},
|
||||
|
||||
@@ -42,10 +42,143 @@ import (
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func handleWorkersChange(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, cluster *k8s.RecordK8S) error {
|
||||
o, n := d.GetChange("workers")
|
||||
old_set, _ := o.([]interface{})
|
||||
new_set, _ := n.([]interface{})
|
||||
old_len := len(old_set)
|
||||
new_len := len(new_set)
|
||||
|
||||
if old_len > new_len {
|
||||
deleted := workersDifference(old_set, new_set)
|
||||
if err := deleteWGs(ctx, c, cluster, deleted); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else if old_len < new_len {
|
||||
added := workersDifference(old_set, new_set)
|
||||
if err := addWGs(ctx, c, cluster, added); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := updateNum(ctx, c, cluster, old_set, new_set); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateNum(ctx context.Context, c *controller.ControllerCfg, cluster *k8s.RecordK8S, old_set []interface{}, new_set []interface{}) error {
|
||||
for _, valOld := range old_set {
|
||||
wgOld, _ := valOld.(map[string]interface{})
|
||||
for _, valNew := range new_set {
|
||||
wgNew, _ := valNew.(map[string]interface{})
|
||||
if wgOld["id"] == wgNew["id"] {
|
||||
oldNum := wgOld["num"].(int)
|
||||
newNum := wgNew["num"].(int)
|
||||
|
||||
if oldNum < newNum {
|
||||
req := k8s.WorkerAddRequest{
|
||||
K8SID: cluster.ID,
|
||||
WorkersGroupID: uint64(wgNew["id"].(int)),
|
||||
Num: uint64(newNum - oldNum),
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().K8S().WorkerAdd(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else if oldNum > newNum {
|
||||
for i := oldNum - 1; i >= newNum; i-- {
|
||||
detailedInfo := wgOld["detailed_info"].([]interface{})
|
||||
if len(detailedInfo) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
req := k8s.DeleteWorkerFromGroupRequest{
|
||||
K8SID: cluster.ID,
|
||||
WorkersGroupID: uint64(wgNew["id"].(int)),
|
||||
WorkerID: uint64(detailedInfo[i].(map[string]interface{})["compute_id"].(int)),
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().K8S().DeleteWorkerFromGroup(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteWGs(ctx context.Context, c *controller.ControllerCfg, cluster *k8s.RecordK8S, deleted []interface{}) error {
|
||||
for _, elem := range deleted {
|
||||
found_wg := elem.(map[string]interface{})
|
||||
req := k8s.WorkersGroupDeleteRequest{
|
||||
K8SID: cluster.ID,
|
||||
WorkersGroupID: uint64(found_wg["id"].(int)),
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().K8S().WorkersGroupDelete(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addWGs(ctx context.Context, c *controller.ControllerCfg, cluster *k8s.RecordK8S, added []interface{}) error {
|
||||
for _, elem := range added {
|
||||
found_wg := elem.(map[string]interface{})
|
||||
req := k8s.WorkersGroupAddRequest{
|
||||
K8SID: cluster.ID,
|
||||
Name: found_wg["name"].(string),
|
||||
WorkerSEPID: uint64(found_wg["sep_id"].(int)),
|
||||
WorkerSEPPool: found_wg["sep_pool"].(string),
|
||||
WorkerNum: uint64(found_wg["num"].(int)),
|
||||
WorkerCPU: uint64(found_wg["cpu"].(int)),
|
||||
WorkerRAM: uint64(found_wg["ram"].(int)),
|
||||
WorkerDisk: uint64(found_wg["disk"].(int)),
|
||||
}
|
||||
|
||||
labels, _ := found_wg["labels"].([]interface{})
|
||||
for _, label := range labels {
|
||||
req.Labels = append(req.Labels, label.(string))
|
||||
}
|
||||
|
||||
annotations, _ := found_wg["annotations"].([]interface{})
|
||||
for _, annotation := range annotations {
|
||||
req.Annotations = append(req.Annotations, annotation.(string))
|
||||
}
|
||||
|
||||
taints, _ := found_wg["taints"].([]interface{})
|
||||
for _, taint := range taints {
|
||||
req.Taints = append(req.Taints, taint.(string))
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().K8S().WorkersGroupAdd(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityK8sCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8S, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
k8sID, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
var k8sID uint64
|
||||
if d.Id() != "" {
|
||||
k8sID, _ = strconv.ParseUint(d.Id(), 10, 64)
|
||||
} else {
|
||||
k8sID = uint64(d.Get("k8s_id").(int))
|
||||
}
|
||||
req := k8s.GetRequest{
|
||||
K8SID: k8sID,
|
||||
}
|
||||
@@ -112,3 +245,29 @@ func utilityK8sListDeletedCheckPresence(ctx context.Context, d *schema.ResourceD
|
||||
|
||||
return k8sList, nil
|
||||
}
|
||||
|
||||
func workersDifference(slice1 []interface{}, slice2 []interface{}) []interface{} {
|
||||
var diff []interface{}
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
for _, s1 := range slice1 {
|
||||
found := false
|
||||
for _, s2 := range slice2 {
|
||||
if s1.(map[string]interface{})["id"] == s2.(map[string]interface{})["id"] {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
diff = append(diff, s1)
|
||||
}
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
slice1, slice2 = slice2, slice1
|
||||
}
|
||||
}
|
||||
|
||||
return diff
|
||||
}
|
||||
|
||||
@@ -373,7 +373,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
req := compute.PFWAddRequest{
|
||||
ComputeID: computeId,
|
||||
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
|
||||
PublicPortEnd: uint64(pfwItem["public_port_end"].(int)),
|
||||
PublicPortEnd: int64(pfwItem["public_port_end"].(int)),
|
||||
LocalBasePort: uint64(pfwItem["local_port"].(int)),
|
||||
Proto: pfwItem["proto"].(string),
|
||||
}
|
||||
@@ -630,6 +630,8 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
Force: true,
|
||||
}
|
||||
|
||||
warnings := dc.Warnings{}
|
||||
|
||||
oldCpu, newCpu := d.GetChange("cpu")
|
||||
if oldCpu.(int) != newCpu.(int) {
|
||||
resizeReq.CPU = uint64(newCpu.(int))
|
||||
@@ -1059,7 +1061,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
|
||||
_, err := c.CloudAPI().Compute().PFWDel(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
warnings.Add(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1071,7 +1073,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
req := compute.PFWAddRequest{
|
||||
ComputeID: computeRec.ID,
|
||||
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
|
||||
PublicPortEnd: uint64(pfwItem["public_port_end"].(int)),
|
||||
PublicPortEnd: int64(pfwItem["public_port_end"].(int)),
|
||||
LocalBasePort: uint64(pfwItem["local_port"].(int)),
|
||||
Proto: pfwItem["proto"].(string),
|
||||
}
|
||||
@@ -1311,7 +1313,8 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
|
||||
// we may reuse dataSourceComputeRead here as we maintain similarity
|
||||
// between Compute resource and Compute data source schemas
|
||||
return resourceComputeRead(ctx, d, m)
|
||||
defer resourceComputeRead(ctx, d, m)
|
||||
return warnings.Get()
|
||||
}
|
||||
|
||||
func isChangeDisk(els []interface{}, el interface{}) bool {
|
||||
@@ -1696,6 +1699,7 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
||||
|
||||
"network": {
|
||||
Type: schema.TypeSet,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
MinItems: 1,
|
||||
MaxItems: constants.MaxNetworksPerCompute,
|
||||
|
||||
@@ -57,7 +57,7 @@ func resourcePfwCreate(ctx context.Context, d *schema.ResourceData, m interface{
|
||||
}
|
||||
|
||||
if portEnd, ok := d.GetOk("public_port_end"); ok {
|
||||
req.PublicPortEnd = uint64(portEnd.(int))
|
||||
req.PublicPortEnd = int64(portEnd.(int))
|
||||
}
|
||||
|
||||
pfwId, err := c.CloudAPI().Compute().PFWAdd(ctx, req)
|
||||
|
||||
@@ -55,7 +55,7 @@ func resourcePfwCreate(ctx context.Context, d *schema.ResourceData, m interface{
|
||||
req.Proto = d.Get("proto").(string)
|
||||
|
||||
if portEnd, ok := d.GetOk("public_port_end"); ok {
|
||||
req.PublicPortEnd = uint64(portEnd.(int))
|
||||
req.PublicPortEnd = int64(portEnd.(int))
|
||||
}
|
||||
|
||||
pfwId, err := c.CloudBroker().Compute().PFWAdd(ctx, req)
|
||||
|
||||
31
samples/cloudapi/data_k8s_computes/main.tf
Normal file
31
samples/cloudapi/data_k8s_computes/main.tf
Normal file
@@ -0,0 +1,31 @@
|
||||
#Расскомментируйте этот код,
|
||||
#и внесите необходимые правки в версию и путь,
|
||||
#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером
|
||||
/*
|
||||
terraform {
|
||||
required_providers {
|
||||
decort = {
|
||||
source = "terraform.local/local/decort"
|
||||
version = "<VERSION>"
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
provider "decort" {
|
||||
authenticator = "oauth2"
|
||||
oauth2_url = "https://sso.digitalenergy.online"
|
||||
controller_url = "https://mr4.digitalenergy.online"
|
||||
app_id = ""
|
||||
app_secret = ""
|
||||
}
|
||||
|
||||
data "decort_k8s_computes" "computes" {
|
||||
# ID кластера
|
||||
# Обязательный параметр
|
||||
k8s_id = 999
|
||||
}
|
||||
|
||||
output "computes_out" {
|
||||
value = data.decort_k8s_computes.computes
|
||||
}
|
||||
@@ -1,15 +1,3 @@
|
||||
/*
|
||||
Пример использования
|
||||
Ресурсов k8s cluster
|
||||
Ресурсы позволяет:
|
||||
1. Создавать
|
||||
2. Редактировать
|
||||
3. Удалять
|
||||
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#Расскомментируйте этот код,
|
||||
#и внесите необходимые правки в версию и путь,
|
||||
#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером
|
||||
@@ -18,7 +6,7 @@ terraform {
|
||||
required_providers {
|
||||
decort = {
|
||||
source = "terraform.local/local/decort"
|
||||
version = "1.0.0"
|
||||
version = "<VERSION>"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -32,7 +20,6 @@ provider "decort" {
|
||||
app_secret = ""
|
||||
}
|
||||
|
||||
|
||||
resource "decort_k8s" "cluster" {
|
||||
#имя кластера
|
||||
#обязательный параметр
|
||||
@@ -50,11 +37,34 @@ resource "decort_k8s" "cluster" {
|
||||
#тип - число
|
||||
k8sci_id = 9
|
||||
|
||||
#сетевой плагин
|
||||
#обязательный параметр
|
||||
#тип - строка
|
||||
network_plugin = "flannel"
|
||||
|
||||
#имя для первой worker group, созданной в кластере
|
||||
#обязательный параметр
|
||||
#тип - строка
|
||||
wg_name = "workers"
|
||||
|
||||
# список labels для дефолтной worker группы
|
||||
# опциональный параметр
|
||||
# В скором времени параметры labels, annotations, taints будут полностью перенесены в блок workers
|
||||
# тип - массив строк
|
||||
labels = ["key1=val1", "key2=val2"]
|
||||
|
||||
# список annotations для дефолтной worker группы
|
||||
# опциональный параметр
|
||||
# В скором времени параметры labels, annotations, taints будут полностью перенесены в блок workers
|
||||
# тип - массив строк
|
||||
annotations = ["key1=val1", "key2=val2"]
|
||||
|
||||
# список taints для дефолтной worker группы
|
||||
# опциональный параметр
|
||||
# В скором времени параметры labels, annotations, taints будут полностью перенесены в блок workers
|
||||
# тип - массив строк
|
||||
taints = ["key1=val1", "key2=val2"]
|
||||
|
||||
#настройка мастер node или nodes
|
||||
#опциональный параметр
|
||||
#максимальное кол-во элементов - 1
|
||||
@@ -83,11 +93,16 @@ resource "decort_k8s" "cluster" {
|
||||
disk = 10
|
||||
}
|
||||
|
||||
#настройка worker node или nodes
|
||||
#настройка worker группы
|
||||
#опциональный параметр
|
||||
#максимальное кол-во элементов - 1
|
||||
#тип - список нод
|
||||
#Первая указанная воркер-группа должна соответствовать изначально созданной вместе с кластером.
|
||||
# labels, annotations, taints для дефолтной worker группы указываются в корне ресурса при создании кластера.
|
||||
workers {
|
||||
#наименование worker группы
|
||||
#обязательный параметр
|
||||
#тип - строка
|
||||
name = "workers_wg"
|
||||
|
||||
#кол-во node
|
||||
#обязательный параметр
|
||||
#тип - число
|
||||
@@ -107,6 +122,70 @@ resource "decort_k8s" "cluster" {
|
||||
#обязательный параметр
|
||||
#тип - число
|
||||
disk = 10
|
||||
|
||||
#Идентификатор SEP
|
||||
#опциональный параметр
|
||||
#тип - число
|
||||
sep_id = 1010
|
||||
|
||||
#Имя SEP pool'a
|
||||
#опциональный параметр
|
||||
#тип - строка
|
||||
sep_pool = "data01"
|
||||
}
|
||||
|
||||
#...Далее можно создавать произвольное кол-во дополнительных worker групп
|
||||
# labels, annotations и taints для последующих групп указываются непосредственно в блоке workers
|
||||
workers {
|
||||
#наименование worker группы
|
||||
#обязательный параметр
|
||||
#тип - строка
|
||||
name = "additional_wg"
|
||||
|
||||
#кол-во node
|
||||
#обязательный параметр
|
||||
#тип - число
|
||||
num = 2
|
||||
|
||||
#кол-во cpu
|
||||
#обязательный параметр
|
||||
#тип - число
|
||||
cpu = 2
|
||||
|
||||
#кол-во RAM в Мбайтах
|
||||
#обязательный параметр
|
||||
#тип - число
|
||||
ram = 4096
|
||||
|
||||
#размер диска в Гбайтах
|
||||
#обязательный параметр
|
||||
#тип - число
|
||||
disk = 10
|
||||
|
||||
#Идентификатор SEP
|
||||
#опциональный параметр
|
||||
#тип - число
|
||||
sep_id = 1010
|
||||
|
||||
#Имя SEP pool'a
|
||||
#опциональный параметр
|
||||
#тип - строка
|
||||
sep_pool = "data01"
|
||||
|
||||
#Список лейблов
|
||||
#опциональный параметр
|
||||
#тип - массив строк
|
||||
labels = ["label1=value1", "label2=value2"]
|
||||
|
||||
#Список аннотаций
|
||||
#опциональный параметр
|
||||
#тип - массив строк
|
||||
annotations = ["key1=value1", "key2=value2"]
|
||||
|
||||
#Список taints
|
||||
#опциональный параметр
|
||||
#тип - массив строк
|
||||
taints = ["key1=value1:NoSchedule", "key2=value2:NoExecute"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user