Compare commits

...

8 Commits
4.0.0 ... 4.2.3

Author SHA1 Message Date
6365f63fc1 4.2.3 2023-06-29 16:02:38 +03:00
85ce76564f 4.2.2 2023-06-23 15:30:46 +03:00
928481d26f 4.2.1 2023-06-19 16:05:27 +03:00
0e64974821 4.2.0 2023-06-08 12:26:21 +03:00
371bb0d90f 4.1.1 2023-06-01 17:40:47 +03:00
caf7213bca 4.1.0 2023-05-26 17:12:03 +03:00
9cf150437d 4.0.2 2023-05-23 16:48:16 +03:00
523d96189f 4.0.1 2023-05-19 17:14:55 +03:00
153 changed files with 3130 additions and 517 deletions

2
.gitignore vendored
View File

@@ -2,4 +2,4 @@ decort/vendor/
examples/
url_scrapping/
terraform-provider-decort*
.vscode/
.vscode/

View File

@@ -1,30 +1,4 @@
## Version 4.0.0
## Version 4.2.3
### Features
- Updated provider to the newest DECORT version (3.8.6):
- resource_k8s now has "network_plugin" required field
- Added "start" argument to resource_k8s
- data_source_bservice, resource_bservice "computes" and "groups" fields now have more in-depth information
- resource_compute now has full boot disk information
- data_source_extnet now has additional field "e_burst" inside "default_qos" substructure
- data_source_rg_list, data_source_rg, resource_rg, resource_account and data_source_account now have two additional fields:
- cpu_allocation_parameter
- cpu_allocation_ratio
- unnecessary fields were removed from all disks data sources && resources (except for unattached disks), to be exact:
- boot_partition
- guid
- disk_path
- iqn
- login
- milestones
- password
- purge_attempts
- reality_device_number
- reference_id
- Removed automatic switch between cloudbroker/cloudapi (admin/user modes)
## Bug Fix
- Fixed resource_compute error when creating a new instance with EXTNET type network
- Fixed quota record marshalling in decort_resgroup resource
- Fixed GID validation for multiple resources
- Fixed possible 'addUser' error when importing resource_account with already added user
## Bug Fixes
- Made "ipcidr" field in decort_vins resource optional

View File

@@ -8,7 +8,7 @@ ZIPDIR = ./zip
BINARY=${NAME}.exe
WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH}
MAINPATH = ./cmd/decort/
VERSION=4.0.0
VERSION=4.2.3
#OS_ARCH=darwin_amd64
OS_ARCH=windows_amd64
#OS_ARCH=linux_amd64

2
go.mod
View File

@@ -9,7 +9,7 @@ require (
github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1
github.com/sirupsen/logrus v1.9.0
golang.org/x/net v0.5.0
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.0
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.7
)
require (

4
go.sum
View File

@@ -332,5 +332,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.0 h1:y8F3bzEb8mROEYnoG495AxpbSTEkfSpFh97sZe42cJE=
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.0/go.mod h1:YP57mpXh60xeRERVaCehn+l0S7Qe24trVll1EvrKzds=
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.7 h1:1lHhvOsMX5iFQ4z2qmVT7cORbCr+hTeEH9Lk1E2liYE=
repository.basistech.ru/BASIS/decort-golang-sdk v1.4.7/go.mod h1:szsTGa73O75ckCWVGJPvTtRbhA/ubuYrYhMkPjvHlmE=

View File

@@ -158,7 +158,6 @@ func ControllerConfigure(d *schema.ResourceData) (*ControllerCfg, error) {
Username: ret_config.legacy_user,
Password: ret_config.legacy_password,
DecortURL: ret_config.controller_url,
Retries: 0,
SSLSkipVerify: allow_unverified_ssl,
}

View File

@@ -26,6 +26,7 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/bservice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/extnet"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/flipgroup"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/k8s"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm"
@@ -63,6 +64,7 @@ func newDataSourcesMap() map[string]*schema.Resource {
"decort_k8s_list_deleted": k8s.DataSourceK8sListDeleted(),
"decort_k8s_wg": k8s.DataSourceK8sWg(),
"decort_k8s_wg_list": k8s.DataSourceK8sWgList(),
"decort_k8s_computes": k8s.DataSourceK8sComputes(),
"decort_vins": vins.DataSourceVins(),
"decort_vins_list": vins.DataSourceVinsList(),
"decort_vins_audits": vins.DataSourceVinsAudits(),
@@ -118,6 +120,8 @@ func newDataSourcesMap() map[string]*schema.Resource {
"decort_lb": lb.DataSourceLB(),
"decort_lb_list": lb.DataSourceLBList(),
"decort_lb_list_deleted": lb.DataSourceLBListDeleted(),
"decort_flipgroup": flipgroup.DataSourceFlipgroup(),
"decort_flipgroup_list": flipgroup.DataSourceFlipGroupList(),
"decort_cb_account": cb_account.DataSourceAccount(),
"decort_cb_account_list": cb_account.DataSourceAccountList(),

View File

@@ -25,6 +25,7 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/bservice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/flipgroup"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/k8s"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm"
@@ -57,6 +58,7 @@ func newResourcesMap() map[string]*schema.Resource {
"decort_pfw": pfw.ResourcePfw(),
"decort_k8s": k8s.ResourceK8s(),
"decort_k8s_wg": k8s.ResourceK8sWg(),
"decort_k8s_cp": k8s.ResourceK8sCP(),
"decort_snapshot": snapshot.ResourceSnapshot(),
"decort_account": account.ResourceAccount(),
"decort_bservice": bservice.ResourceBasicService(),
@@ -68,22 +70,23 @@ func newResourcesMap() map[string]*schema.Resource {
"decort_lb_backend_server": lb.ResourceLBBackendServer(),
"decort_lb_frontend": lb.ResourceLBFrontend(),
"decort_lb_frontend_bind": lb.ResourceLBFrontendBind(),
"decort_flipgroup": flipgroup.ResourceFlipgroup(),
"decort_cb_account": cb_account.ResourceAccount(),
"decort_cb_disk": cb_disks.ResourceDisk(),
"decort_cb_image": cb_image.ResourceImage(),
"decort_cb_virtual_image":cb_image.ResourceVirtualImage(),
"decort_cb_cdrom_image": cb_image.ResourceCDROMImage(),
"decort_cb_delete_images":cb_image.ResourceDeleteImages(),
"decort_cb_pcidevice": cb_pcidevice.ResourcePcidevice(),
"decort_cb_sep": cb_sep.ResourceSep(),
"decort_cb_sep_config": cb_sep.ResourceSepConfig(),
"decort_cb_resgroup": cb_rg.ResourceResgroup(),
"decort_cb_kvmvm": cb_kvmvm.ResourceCompute(),
"decort_cb_vins": cb_vins.ResourceVins(),
"decort_cb_pfw": cb_pfw.ResourcePfw(),
"decort_cb_k8s": cb_k8s.ResourceK8s(),
"decort_cb_k8s_wg": cb_k8s.ResourceK8sWg(),
"decort_cb_snapshot": cb_snapshot.ResourceSnapshot(),
"decort_cb_account": cb_account.ResourceAccount(),
"decort_cb_disk": cb_disks.ResourceDisk(),
"decort_cb_image": cb_image.ResourceImage(),
"decort_cb_virtual_image": cb_image.ResourceVirtualImage(),
"decort_cb_cdrom_image": cb_image.ResourceCDROMImage(),
"decort_cb_delete_images": cb_image.ResourceDeleteImages(),
"decort_cb_pcidevice": cb_pcidevice.ResourcePcidevice(),
"decort_cb_sep": cb_sep.ResourceSep(),
"decort_cb_sep_config": cb_sep.ResourceSepConfig(),
"decort_cb_resgroup": cb_rg.ResourceResgroup(),
"decort_cb_kvmvm": cb_kvmvm.ResourceCompute(),
"decort_cb_vins": cb_vins.ResourceVins(),
"decort_cb_pfw": cb_pfw.ResourcePfw(),
"decort_cb_k8s": cb_k8s.ResourceK8s(),
"decort_cb_k8s_wg": cb_k8s.ResourceK8sWg(),
"decort_cb_snapshot": cb_snapshot.ResourceSnapshot(),
}
}

View File

@@ -0,0 +1,191 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Tim Tkachev, <tvtkachev@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package flipgroup
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceFlipgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
fg, err := utilityFlipgroupCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
flattenFlipgroup(d, fg)
d.SetId(fmt.Sprint(fg.ID))
return nil
}
func dataSourceFlipgroupSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"flipgroup_id": {
Type: schema.TypeInt,
Required: true,
Description: "Flipgroupd ID",
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"client_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"client_names": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"client_type": {
Type: schema.TypeString,
Computed: true,
},
"conn_id": {
Type: schema.TypeInt,
Computed: true,
},
"conn_type": {
Type: schema.TypeString,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"default_gw": {
Type: schema.TypeString,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"ip": {
Type: schema.TypeString,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"net_id": {
Type: schema.TypeInt,
Computed: true,
},
"net_type": {
Type: schema.TypeString,
Computed: true,
},
"network": {
Type: schema.TypeString,
Computed: true,
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
}
}
func DataSourceFlipgroup() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceFlipgroupRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout180s,
Default: &constants.Timeout180s,
},
Schema: dataSourceFlipgroupSchemaMake(),
}
}

View File

@@ -0,0 +1,210 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Tim Tkachev, <tvtkachev@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package flipgroup
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceFlipgroupList(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
fg_list, err := utilityFlipgroupListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenFlipgroupList(fg_list))
return nil
}
func dataSourceFlipgroupListSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"flipgroup_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"client_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"client_names": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"client_type": {
Type: schema.TypeString,
Computed: true,
},
"conn_id": {
Type: schema.TypeInt,
Computed: true,
},
"conn_type": {
Type: schema.TypeString,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"default_gw": {
Type: schema.TypeString,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"net_id": {
Type: schema.TypeInt,
Computed: true,
},
"net_type": {
Type: schema.TypeString,
Computed: true,
},
"ip": {
Type: schema.TypeString,
Computed: true,
},
"network": {
Type: schema.TypeString,
Computed: true,
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
}
}
func DataSourceFlipGroupList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceFlipgroupList,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout180s,
Default: &constants.Timeout180s,
},
Schema: dataSourceFlipgroupListSchemaMake(),
}
}

View File

@@ -0,0 +1,107 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Tim Tkachev, <tvtkachev@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package flipgroup
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/flipgroup"
)
func flattenFlipgroup(d *schema.ResourceData, fg *flipgroup.ItemFLIPGroup) {
d.Set("account_id", fg.AccountID)
d.Set("account_name", fg.AccountName)
d.Set("client_ids", fg.ClientIDs)
d.Set("client_names", fg.ClientNames)
d.Set("client_type", fg.ClientType)
d.Set("conn_id", fg.ConnID)
d.Set("conn_type", fg.ConnType)
d.Set("created_by", fg.CreatedBy)
d.Set("created_time", fg.CreatedTime)
d.Set("default_gw", fg.DefaultGW)
d.Set("deleted_by", fg.DeletedBy)
d.Set("deleted_time", fg.DeletedTime)
d.Set("desc", fg.Description)
d.Set("gid", fg.GID)
d.Set("guid", fg.GUID)
d.Set("flipgroup_id", fg.ID)
d.Set("ip", fg.IP)
d.Set("milestones", fg.Milestones)
d.Set("name", fg.Name)
d.Set("net_id", fg.NetID)
d.Set("net_type", fg.NetType)
d.Set("network", fg.Network)
d.Set("rg_id", fg.RGID)
d.Set("rg_name", fg.RGName)
d.Set("status", fg.Status)
d.Set("updated_by", fg.UpdatedBy)
d.Set("updated_time", fg.UpdatedTime)
}
func flattenFlipgroupList(fg_list flipgroup.ListFLIPGroups) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, fg := range fg_list {
temp := map[string]interface{}{
"account_id": fg.AccountID,
"account_name": fg.AccountName,
"client_ids": fg.ClientIDs,
"client_names": fg.ClientNames,
"client_type": fg.ClientType,
"conn_id": fg.ConnID,
"conn_type": fg.ConnType,
"created_by": fg.CreatedBy,
"created_time": fg.CreatedTime,
"default_gw": fg.DefaultGW,
"deleted_by": fg.DeletedBy,
"deleted_time": fg.DeletedTime,
"desc": fg.Description,
"gid": fg.GID,
"guid": fg.GUID,
"flipgroup_id": fg.ID,
"ip": fg.IP,
"milestones": fg.Milestones,
"name": fg.Name,
"net_id": fg.NetID,
"net_type": fg.NetType,
"network": fg.Network,
"rg_id": fg.RGID,
"rg_name": fg.RGName,
"status": fg.Status,
"updated_by": fg.UpdatedBy,
"updated_time": fg.UpdatedTime,
}
res = append(res, temp)
}
return res
}

View File

@@ -0,0 +1,309 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Tim Tkachev, <tvtkachev@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package flipgroup
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/flipgroup"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
log "github.com/sirupsen/logrus"
)
func resourceFlipgroupCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceFlipgroupCreate called with name: %s, accountID %v", d.Get("name").(string), d.Get("account_id").(int))
c := m.(*controller.ControllerCfg)
req := flipgroup.CreateRequest{
AccountID: uint64(d.Get("account_id").(int)),
Name: d.Get("name").(string),
NetType: d.Get("net_type").(string),
NetID: uint64(d.Get("net_id").(int)),
ClientType: d.Get("client_type").(string),
IP: d.Get("ip").(string),
Description: d.Get("desc").(string),
}
resp, err := c.CloudAPI().FLIPGroup().Create(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId(fmt.Sprint(resp.ID))
var warnings dc.Warnings
if client_ids, ok := d.GetOk("client_ids"); ok {
casted := client_ids.([]interface{})
addComputesAfterCreation(ctx, &warnings, c, casted, resp.ID)
}
defer resourceFlipgroupRead(ctx, d, m)
return warnings.Get()
}
func resourceFlipgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
fg, err := utilityFlipgroupCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
flattenFlipgroup(d, fg)
return nil
}
func resourceFlipgroupUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceFlipgroupUpdate called with id: %v", d.Get("flipgroup_id").(int))
c := m.(*controller.ControllerCfg)
fg, err := utilityFlipgroupCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
var warnings dc.Warnings
basicUpdate := false
req := flipgroup.EditRequest{FLIPGroupID: fg.ID}
if d.HasChange("desc") {
req.Description = d.Get("desc").(string)
basicUpdate = true
}
if d.HasChange("name") {
req.Name = d.Get("name").(string)
basicUpdate = true
}
if basicUpdate {
_, err := c.CloudAPI().FLIPGroup().Edit(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("client_ids") {
handleClientIDsUpdate(ctx, d, c, fg, &warnings)
}
defer resourceFlipgroupRead(ctx, d, m)
return warnings.Get()
}
func resourceFlipgroupDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceFlipgroupDelete called with id: %v", d.Get("flipgroup_id").(int))
c := m.(*controller.ControllerCfg)
fg, err := utilityFlipgroupCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
req := flipgroup.DeleteRequest{
FLIPGroupID: fg.ID,
}
_, err = c.CloudAPI().FLIPGroup().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
return nil
}
func resourceFlipgroupSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
Description: "Account ID",
},
"name": {
Type: schema.TypeString,
Required: true,
Description: "Flipgroup name",
},
"net_id": {
Type: schema.TypeInt,
Required: true,
Description: "EXTNET or ViNS ID",
},
"net_type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS"}, true),
Description: "Network type, EXTNET or VINS",
},
"client_type": {
Type: schema.TypeString,
Required: true,
Description: "Type of client, 'compute' ('vins' will be later)",
ValidateFunc: validation.StringInSlice([]string{"compute"}, true),
},
"ip": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "IP address to associate with this group. If empty, the platform will autoselect IP address",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Text description of this Flipgroup instance",
},
"client_ids": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of clients attached to this Flipgroup instance",
},
"flipgroup_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"client_names": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"conn_id": {
Type: schema.TypeInt,
Computed: true,
},
"conn_type": {
Type: schema.TypeString,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"default_gw": {
Type: schema.TypeString,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"network": {
Type: schema.TypeString,
Computed: true,
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
}
}
func ResourceFlipgroup() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
CreateContext: resourceFlipgroupCreate,
ReadContext: resourceFlipgroupRead,
UpdateContext: resourceFlipgroupUpdate,
DeleteContext: resourceFlipgroupDelete,
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout300s,
Read: &constants.Timeout300s,
Update: &constants.Timeout300s,
Delete: &constants.Timeout300s,
Default: &constants.Timeout300s,
},
Schema: resourceFlipgroupSchemaMake(),
}
}

View File

@@ -0,0 +1,167 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Tim Tkachev, <tvtkachev@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package flipgroup
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/flipgroup"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
log "github.com/sirupsen/logrus"
)
func handleClientIDsUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, fg *flipgroup.ItemFLIPGroup, warn *dc.Warnings) {
addedClients := make([]interface{}, 0)
removedClients := make([]interface{}, 0)
old_set, new_set := d.GetChange("client_ids")
oldSlice := old_set.([]interface{})
newSlice := new_set.([]interface{})
for _, oldElem := range oldSlice {
if !containsClient(newSlice, oldElem) {
removedClients = append(removedClients, oldElem)
}
}
for _, newElem := range newSlice {
if !containsClient(oldSlice, newElem) {
addedClients = append(addedClients, newElem)
}
}
log.Debugf("Found client_ids change with %v deletion(s) and %v addition(s) [flipgroupID=%v]", len(removedClients), len(addedClients), fg.ID)
if len(addedClients) > 0 {
for _, id := range addedClients {
req := flipgroup.ComputeAddRequest{
FLIPGroupID: fg.ID,
ComputeID: uint64(id.(int)),
}
if _, err := c.CloudAPI().FLIPGroup().ComputeAdd(ctx, req); err != nil {
warn.Add(err)
}
}
}
if len(removedClients) > 0 {
for _, id := range removedClients {
req := flipgroup.ComputeRemoveRequest{
FLIPGroupID: fg.ID,
ComputeID: uint64(id.(int)),
}
if _, err := c.CloudAPI().FLIPGroup().ComputeRemove(ctx, req); err != nil {
warn.Add(err)
}
}
}
}
func containsClient(set []interface{}, check interface{}) bool {
for _, elem := range set {
elemConv := elem.(int)
checkConv := check.(int)
if elemConv == checkConv {
return true
}
}
return false
}
func addComputesAfterCreation(ctx context.Context, warnings *dc.Warnings, c *controller.ControllerCfg, compute_ids []interface{}, flipgroupID uint64) {
if len(compute_ids) == 0 {
return
}
log.Debugf("Adding %v computes to flipgroup [id=%v]...", len(compute_ids), flipgroupID)
for _, elem := range compute_ids {
compute_id := uint64(elem.(int))
req := flipgroup.ComputeAddRequest{
FLIPGroupID: flipgroupID,
ComputeID: compute_id,
}
_, err := c.CloudAPI().FLIPGroup().ComputeAdd(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
func utilityFlipgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*flipgroup.ItemFLIPGroup, error) {
c := m.(*controller.ControllerCfg)
req := flipgroup.GetRequest{}
if d.Id() == "" {
req.FLIPGroupID = uint64(d.Get("flipgroup_id").(int))
} else {
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.FLIPGroupID = id
}
fg, err := c.CloudAPI().FLIPGroup().Get(ctx, req)
if err != nil {
return nil, err
}
return fg, err
}
func utilityFlipgroupListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (flipgroup.ListFLIPGroups, error) {
c := m.(*controller.ControllerCfg)
req := flipgroup.ListRequest{}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
fg_list, err := c.CloudAPI().FLIPGroup().List(ctx, req)
if err != nil {
return nil, err
}
return fg_list, err
}

View File

@@ -0,0 +1,123 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Tim Tkachev, <tvtkachev@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package k8s
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceK8sComputesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
cluster, err := utilityK8sCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(fmt.Sprint(cluster.ID))
flattenK8sDataComputes(d, cluster)
return nil
}
func computesSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"id": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"group_name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
},
}
}
func workerComputesSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{}
}
func dataSourceK8sComputesSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"k8s_id": {
Type: schema.TypeInt,
Required: true,
},
"masters": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: computesSchemaMake(),
},
},
"workers": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: computesSchemaMake(),
},
},
}
}
func DataSourceK8sComputes() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceK8sComputesRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout60s,
Default: &constants.Timeout60s,
},
Schema: dataSourceK8sComputesSchemaMake(),
}
}

View File

@@ -33,11 +33,53 @@ Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/w
package k8s
import (
"strings"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
)
func flattenK8sDataComputes(d *schema.ResourceData, cluster *k8s.RecordK8S) {
d.Set("k8s_id", cluster.ID)
d.Set("masters", flattenMasterComputes(cluster))
d.Set("workers", flattenWorkerComputes(cluster))
}
func flattenMasterComputes(cluster *k8s.RecordK8S) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, comp := range cluster.K8SGroups.Masters.DetailedInfo {
temp := map[string]interface{}{
"id": comp.ID,
"name": comp.Name,
"status": comp.Status,
"tech_status": comp.TechStatus,
"group_name": cluster.K8SGroups.Masters.Name,
}
res = append(res, temp)
}
return res
}
func flattenWorkerComputes(cluster *k8s.RecordK8S) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, wg := range cluster.K8SGroups.Workers {
for _, comp := range wg.DetailedInfo {
temp := map[string]interface{}{
"id": comp.ID,
"name": comp.Name,
"status": comp.Status,
"tech_status": comp.TechStatus,
"group_name": wg.Name,
}
res = append(res, temp)
}
}
return res
}
func flattenAclList(aclList k8s.ListACL) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, acl := range aclList {
@@ -128,6 +170,14 @@ func flattenMasterGroup(mastersGroup k8s.MasterGroup, masters []compute.RecordCo
func flattenK8sGroup(k8SGroupList k8s.ListK8SGroups, workers []compute.RecordCompute) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, k8sGroup := range k8SGroupList {
labels := make([]string, 0)
for _, label := range k8sGroup.Labels {
if strings.HasPrefix(label, "workersGroupName") {
continue
}
labels = append(labels, label)
}
temp := map[string]interface{}{
"annotations": k8sGroup.Annotations,
"cpu": k8sGroup.CPU,
@@ -135,7 +185,7 @@ func flattenK8sGroup(k8SGroupList k8s.ListK8SGroups, workers []compute.RecordCom
"disk": k8sGroup.Disk,
"guid": k8sGroup.GUID,
"id": k8sGroup.ID,
"labels": k8sGroup.Labels,
"labels": labels,
"name": k8sGroup.Name,
"num": k8sGroup.Num,
"ram": k8sGroup.RAM,
@@ -253,6 +303,43 @@ func flattenK8sList(d *schema.ResourceData, k8sItems k8s.ListK8SClusters) {
d.Set("items", flattenK8sItems(k8sItems))
}
func flattenResourceK8sCP(d *schema.ResourceData, k8s k8s.RecordK8S, masters []compute.RecordCompute) {
d.Set("acl", flattenAcl(k8s.ACL))
d.Set("account_id", k8s.AccountID)
d.Set("account_name", k8s.AccountName)
d.Set("k8sci_id", k8s.CIID)
d.Set("bservice_id", k8s.BServiceID)
d.Set("created_by", k8s.CreatedBy)
d.Set("created_time", k8s.CreatedTime)
d.Set("deleted_by", k8s.DeletedBy)
d.Set("deleted_time", k8s.DeletedTime)
d.Set("k8s_ci_name", k8s.K8CIName)
d.Set("with_lb", k8s.LBID != 0)
d.Set("lb_id", k8s.LBID)
d.Set("k8s_id", k8s.ID)
d.Set("name", k8s.Name)
d.Set("rg_id", k8s.RGID)
d.Set("rg_name", k8s.RGName)
d.Set("status", k8s.Status)
d.Set("tech_status", k8s.TechStatus)
d.Set("updated_by", k8s.UpdatedBy)
d.Set("updated_time", k8s.UpdatedTime)
d.Set("network_plugin", k8s.NetworkPlugin)
flattenCPParams(d, k8s.K8SGroups.Masters, masters)
}
func flattenCPParams(d *schema.ResourceData, mastersGroup k8s.MasterGroup, masters []compute.RecordCompute) {
d.Set("cpu", mastersGroup.CPU)
d.Set("detailed_info", flattenDetailedInfo(mastersGroup.DetailedInfo, masters))
d.Set("disk", mastersGroup.Disk)
d.Set("master_id", mastersGroup.ID)
d.Set("master_name", mastersGroup.Name)
d.Set("num", mastersGroup.Num)
d.Set("ram", mastersGroup.RAM)
d.Set("master_id", mastersGroup.ID)
}
func flattenResourceK8s(d *schema.ResourceData, k8s k8s.RecordK8S, masters []compute.RecordCompute, workers []compute.RecordCompute) {
wg_name := k8s.K8SGroups.Workers[0].Name
@@ -283,12 +370,21 @@ func flattenResourceK8s(d *schema.ResourceData, k8s k8s.RecordK8S, masters []com
}
func flattenWg(d *schema.ResourceData, wg k8s.ItemK8SGroup, computes []compute.RecordCompute) {
labels := make([]string, 0)
for _, label := range wg.Labels {
if strings.HasPrefix(label, "workersGroupName") {
continue
}
labels = append(labels, label)
}
d.Set("annotations", wg.Annotations)
d.Set("cpu", wg.CPU)
d.Set("detailed_info", flattenDetailedInfo(wg.DetailedInfo, computes))
d.Set("disk", wg.Disk)
d.Set("guid", wg.GUID)
d.Set("labels", wg.Labels)
d.Set("labels", labels)
d.Set("name", wg.Name)
d.Set("num", wg.Num)
d.Set("ram", wg.RAM)

View File

@@ -39,12 +39,16 @@ import (
)
type K8sNodeRecord struct {
ID int `json:"id"`
Name string `json:"name"`
Disk int `json:"disk"`
Cpu int `json:"cpu"`
Num int `json:"num"`
Ram int `json:"ram"`
ID int `json:"id"`
Name string `json:"name"`
Disk int `json:"disk"`
Cpu int `json:"cpu"`
Num int `json:"num"`
Ram int `json:"ram"`
// coming in future updates (curr. version 4.0.2)
// Labels []interface{} `json:"labels"`
// Annotations []interface{} `json:"annotations"`
// Taints []interface{} `json:"taints"`
DetailedInfo []struct {
ID int `json:"id"`
Name string `json:"name"`
@@ -53,7 +57,7 @@ type K8sNodeRecord struct {
SepPool string `json:"SepPool"`
}
//K8sRecord represents k8s instance
// K8sRecord represents k8s instance
type K8sRecord struct {
AccountID int `json:"accountId"`
AccountName string `json:"accountName"`
@@ -72,7 +76,7 @@ type K8sRecord struct {
type K8sRecordList []K8sRecord
//LbRecord represents load balancer instance
// LbRecord represents load balancer instance
type LbRecord struct {
ID int `json:"id"`
Name string `json:"name"`
@@ -87,7 +91,7 @@ type LbRecord struct {
} `json:"primaryNode"`
}
//Blasphemous workaround for parsing Result value
// Blasphemous workaround for parsing Result value
type TaskResult int
func (r *TaskResult) UnmarshalJSON(b []byte) error {
@@ -117,7 +121,7 @@ func (r *TaskResult) UnmarshalJSON(b []byte) error {
return nil
}
//AsyncTask represents a long task completion status
// AsyncTask represents a long task completion status
type AsyncTask struct {
AuditID string `json:"auditId"`
Completed bool `json:"completed"`
@@ -136,7 +140,7 @@ type SshKeyConfig struct {
UserShell string
}
//FromSDK
// FromSDK
type K8SGroup struct {
Annotations []string `json:"annotations"`
CPU uint64 `json:"cpu"`

View File

@@ -32,7 +32,10 @@ Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/w
package k8s
import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
func nodeMasterDefault() K8sNodeRecord {
return K8sNodeRecord{
@@ -52,7 +55,7 @@ func nodeWorkerDefault() K8sNodeRecord {
}
}
func parseNode(nodeList []interface{}) K8sNodeRecord {
func parseDefaultNode(nodeList []interface{}) K8sNodeRecord {
node := nodeList[0].(map[string]interface{})
return K8sNodeRecord{
@@ -65,51 +68,13 @@ func parseNode(nodeList []interface{}) K8sNodeRecord {
}
}
func nodeToResource(node K8sNodeRecord) []interface{} {
mp := make(map[string]interface{})
mp["num"] = node.Num
mp["cpu"] = node.Cpu
mp["ram"] = node.Ram
mp["disk"] = node.Disk
return []interface{}{mp}
}
func nodeK8sSubresourceSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"num": {
Type: schema.TypeInt,
Required: true,
Description: "Number of nodes to create.",
},
"cpu": {
Type: schema.TypeInt,
Required: true,
Description: "Node CPU count.",
},
"ram": {
Type: schema.TypeInt,
Required: true,
Description: "Node RAM in MB.",
},
"disk": {
Type: schema.TypeInt,
Required: true,
Description: "Node boot disk size in GB.",
},
}
}
func mastersSchemaMake() map[string]*schema.Schema {
masters := masterGroupSchemaMake()
masters["num"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
Description: "Number of nodes to create.",
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntInSlice([]int{1, 3}),
Description: "Number of nodes to create. Can be either 1 or 3",
}
masters["sep_id"] = &schema.Schema{
Type: schema.TypeInt,
@@ -137,41 +102,78 @@ func mastersSchemaMake() map[string]*schema.Schema {
ForceNew: true,
Description: "Node boot disk size in GB.",
}
return masters
}
func workersSchemaMake() map[string]*schema.Schema {
workers := k8sGroupListSchemaMake()
workers["num"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
Description: "Number of nodes to create.",
return map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
"num": {
Type: schema.TypeInt,
Required: true,
},
"ram": {
Type: schema.TypeInt,
Required: true,
},
"cpu": {
Type: schema.TypeInt,
Required: true,
},
"disk": {
Type: schema.TypeInt,
Required: true,
},
"annotations": {
Type: schema.TypeList,
Computed: true,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"detailed_info": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: detailedInfoSchemaMake(),
},
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"labels": {
Type: schema.TypeList,
Computed: true,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"taints": {
Type: schema.TypeList,
Computed: true,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
},
"sep_pool": {
Type: schema.TypeString,
Optional: true,
},
}
workers["sep_id"] = &schema.Schema{
Type: schema.TypeInt,
Optional: true,
}
workers["sep_pool"] = &schema.Schema{
Type: schema.TypeString,
Optional: true,
}
workers["cpu"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "Node CPU count.",
}
workers["ram"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "Node RAM in MB.",
}
workers["disk"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "Node boot disk size in GB.",
}
return workers
}

View File

@@ -95,7 +95,7 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
var masterNode K8sNodeRecord
if masters, ok := d.GetOk("masters"); ok {
masterNode = parseNode(masters.([]interface{}))
masterNode = parseDefaultNode(masters.([]interface{}))
} else {
masterNode = nodeMasterDefault()
}
@@ -108,7 +108,7 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
var workerNode K8sNodeRecord
if workers, ok := d.GetOk("workers"); ok {
workerNode = parseNode(workers.([]interface{}))
workerNode = parseDefaultNode(workers.([]interface{}))
} else {
workerNode = nodeWorkerDefault()
}
@@ -123,7 +123,9 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
if labels, ok := d.GetOk("labels"); ok {
labels := labels.([]interface{})
for _, label := range labels {
createReq.Labels = append(createReq.Labels, label.(string))
if !strings.HasPrefix(label.(string), "workersGroupName") {
createReq.Labels = append(createReq.Labels, label.(string))
}
}
}
@@ -141,11 +143,7 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
}
}
if withLB, ok := d.GetOk("with_lb"); ok {
createReq.WithLB = withLB.(bool)
} else {
createReq.WithLB = true
}
createReq.WithLB = d.Get("with_lb").(bool)
if extNet, ok := d.GetOk("extnet_id"); ok {
createReq.ExtNetID = uint64(extNet.(int))
@@ -217,7 +215,7 @@ func resourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{})
return diag.FromErr(err)
}
enableReq := k8s.DisabelEnableRequest{
enableReq := k8s.DisableEnableRequest{
K8SID: id,
}
@@ -374,7 +372,7 @@ func resourceK8sUpdate(ctx context.Context, d *schema.ResourceData, m interface{
return diag.FromErr(err)
}
enableReq := k8s.DisabelEnableRequest{
enableReq := k8s.DisableEnableRequest{
K8SID: id,
}
@@ -408,44 +406,30 @@ func resourceK8sUpdate(ctx context.Context, d *schema.ResourceData, m interface{
}
}
if d.HasChange("name") {
req := k8s.UpdateRequest{
K8SID: cluster.ID,
Name: d.Get("name").(string),
}
updateReq := k8s.UpdateRequest{K8SID: cluster.ID}
doBasicUpdate := false
_, err := c.CloudAPI().K8S().Update(ctx, req)
if d.HasChange("name") {
updateReq.Name = d.Get("name").(string)
doBasicUpdate = true
}
if d.HasChange("desc") {
updateReq.Description = d.Get("desc").(string)
doBasicUpdate = true
}
if doBasicUpdate {
_, err := c.CloudAPI().K8S().Update(ctx, updateReq)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("workers") {
wg := cluster.K8SGroups.Workers[0]
newWorkers := parseNode(d.Get("workers").([]interface{}))
if uint64(newWorkers.Num) > wg.Num {
req := k8s.WorkerAddRequest{
K8SID: cluster.ID,
WorkersGroupID: wg.ID,
Num: uint64(newWorkers.Num - int(wg.Num)),
}
if _, err := c.CloudAPI().K8S().WorkerAdd(ctx, req); err != nil {
return diag.FromErr(err)
}
} else {
for i := int(wg.Num) - 1; i >= newWorkers.Num; i-- {
req := k8s.DeleteWorkerFromGroupRequest{
K8SID: cluster.ID,
WorkersGroupID: wg.ID,
WorkerID: wg.DetailedInfo[i].ID,
}
if _, err := c.CloudAPI().K8S().DeleteWorkerFromGroup(ctx, req); err != nil {
return diag.FromErr(err)
}
}
err := handleWorkersChange(ctx, d, c, cluster)
if err != nil {
return diag.FromErr(err)
}
}
@@ -564,7 +548,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: workersSchemaMake(),
},
@@ -696,10 +679,10 @@ func ResourceK8s() *schema.Resource {
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout30m,
Read: &constants.Timeout300s,
Update: &constants.Timeout300s,
Delete: &constants.Timeout300s,
Default: &constants.Timeout300s,
Read: &constants.Timeout600s,
Update: &constants.Timeout600s,
Delete: &constants.Timeout600s,
Default: &constants.Timeout600s,
},
Schema: resourceK8sSchemaMake(),

View File

@@ -0,0 +1,711 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Tim Tkachev, <tvtkachev@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package k8s
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/tasks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
)
func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceK8sControlPlaneCreate: called with name %s, rg %d", d.Get("name").(string), d.Get("rg_id").(int))
haveRGID, err := existRGID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveRGID {
return diag.Errorf("resourceK8sCreate: can't create k8s cluster because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
}
haveK8sciID, err := existK8sCIID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveK8sciID {
return diag.Errorf("resourceK8sCreate: can't create k8s cluster because K8sCIID %d is not allowed or does not exist", d.Get("k8sci_id").(int))
}
if _, ok := d.GetOk("extnet_id"); ok {
haveExtNetID, err := existExtNetID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveExtNetID {
return diag.Errorf("resourceK8sCreate: can't create k8s cluster because ExtNetID %d is not allowed or does not exist", d.Get("extnet_id").(int))
}
}
c := m.(*controller.ControllerCfg)
createReq := k8s.CreateRequest{}
createReq.Name = d.Get("name").(string)
createReq.RGID = uint64(d.Get("rg_id").(int))
createReq.K8SCIID = uint64(d.Get("k8sci_id").(int))
createReq.WorkerGroupName = "temp"
createReq.NetworkPlugin = d.Get("network_plugin").(string)
if num, ok := d.GetOk("num"); ok {
createReq.MasterNum = uint(num.(int))
} else {
createReq.MasterNum = 1
}
if cpu, ok := d.GetOk("cpu"); ok {
createReq.MasterCPU = uint(cpu.(int))
} else {
createReq.MasterCPU = 2
}
if ram, ok := d.GetOk("ram"); ok {
createReq.MasterRAM = uint(ram.(int))
} else {
createReq.MasterRAM = 2048
}
if disk, ok := d.GetOk("disk"); ok {
createReq.MasterDisk = uint(disk.(int))
} else {
createReq.MasterDisk = 0
}
if sepId, ok := d.GetOk("sep_id"); ok {
createReq.MasterSEPID = uint64(sepId.(int))
}
if sepPool, ok := d.GetOk("sep_pool"); ok {
createReq.MasterSEPPool = sepPool.(string)
}
createReq.WithLB = d.Get("with_lb").(bool)
if extNet, ok := d.GetOk("extnet_id"); ok {
createReq.ExtNetID = uint64(extNet.(int))
} else {
createReq.ExtNetID = 0
}
if desc, ok := d.GetOk("desc"); ok {
createReq.Description = desc.(string)
}
resp, err := c.CloudAPI().K8S().Create(ctx, createReq)
if err != nil {
return diag.FromErr(err)
}
taskReq := tasks.GetRequest{
AuditID: strings.Trim(resp, `"`),
}
for {
task, err := c.CloudAPI().Tasks().Get(ctx, taskReq)
if err != nil {
return diag.FromErr(err)
}
log.Debugf("resourceK8sControlPlaneCreate: instance creating - %s", task.Stage)
if task.Completed {
if task.Error != "" {
return diag.FromErr(fmt.Errorf("cannot create k8s instance: %v", task.Error))
}
d.SetId(strconv.Itoa(int(task.Result)))
break
}
time.Sleep(time.Second * 10)
}
cluster, err := utilityK8sCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
delWGReq := k8s.WorkersGroupDeleteRequest{
K8SID: cluster.ID,
WorkersGroupID: cluster.K8SGroups.Workers[0].ID,
}
_, err = c.CloudAPI().K8S().WorkersGroupDelete(ctx, delWGReq)
if err != nil {
return diag.FromErr(err)
}
return resourceK8sCPRead(ctx, d, m)
}
func resourceK8sCPRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
cluster, err := utilityK8sCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
hasChanged := false
switch cluster.Status {
case status.Modeled:
return diag.Errorf("The k8s cluster is in status: %s, please, contact support for more information", cluster.Status)
case status.Creating:
case status.Created:
case status.Deleting:
case status.Deleted:
id, _ := strconv.ParseUint(d.Id(), 10, 64)
restoreReq := k8s.RestoreRequest{
K8SID: id,
}
_, err := c.CloudAPI().K8S().Restore(ctx, restoreReq)
if err != nil {
return diag.FromErr(err)
}
enableReq := k8s.DisableEnableRequest{
K8SID: id,
}
_, err = c.CloudAPI().K8S().Enable(ctx, enableReq)
if err != nil {
return diag.FromErr(err)
}
hasChanged = true
case status.Destroying:
return diag.Errorf("The k8s cluster is in progress with status: %s", cluster.Status)
case status.Destroyed:
d.SetId("")
return resourceK8sCreate(ctx, d, m)
case status.Enabling:
case status.Enabled:
case status.Disabling:
case status.Disabled:
log.Debugf("The k8s cluster is in status: %s, troubles may occur with update. Please, enable compute first.", cluster.Status)
case status.Restoring:
}
if hasChanged {
cluster, err = utilityK8sCheckPresence(ctx, d, m)
if cluster == nil {
d.SetId("")
if err != nil {
return diag.FromErr(err)
}
return nil
}
}
if d.Get("start").(bool) {
if cluster.TechStatus == "STOPPED" {
req := k8s.StartRequest{
K8SID: cluster.ID,
}
_, err := c.CloudAPI().K8S().Start(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
k8sList, err := utilityK8sListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
curK8s := k8s.ItemK8SCluster{}
for _, k8sCluster := range k8sList {
if k8sCluster.ID == cluster.ID {
curK8s = k8sCluster
}
}
if curK8s.ID == 0 {
return diag.Errorf("Cluster with id %d not found", cluster.ID)
}
d.Set("vins_id", curK8s.VINSID)
masterComputeList := make([]compute.RecordCompute, 0, len(cluster.K8SGroups.Masters.DetailedInfo))
for _, masterNode := range cluster.K8SGroups.Masters.DetailedInfo {
compute, err := utilityComputeCheckPresence(ctx, d, m, masterNode.ID)
if err != nil {
return diag.FromErr(err)
}
masterComputeList = append(masterComputeList, *compute)
}
var warnings dc.Warnings
if _, ok := d.GetOk("k8s_id"); !ok {
for _, worker := range cluster.K8SGroups.Workers {
err := fmt.Errorf("Found worker-group with ID %d. Make sure to import it to decort_k8s_wg resource if you wish to manage it", worker.ID)
warnings.Add(err)
}
}
flattenResourceK8sCP(d, *cluster, masterComputeList)
lbGetReq := lb.GetRequest{
LBID: cluster.LBID,
}
lb, err := c.CloudAPI().LB().Get(ctx, lbGetReq)
if err != nil {
return diag.FromErr(err)
}
d.Set("extnet_id", lb.ExtNetID)
d.Set("lb_ip", lb.PrimaryNode.FrontendIP)
kubeconfigReq := k8s.GetConfigRequest{
K8SID: cluster.ID,
}
kubeconfig, err := c.CloudAPI().K8S().GetConfig(ctx, kubeconfigReq)
if err != nil {
log.Warnf("could not get kubeconfig: %v", err)
}
d.Set("kubeconfig", kubeconfig)
return warnings.Get()
}
func resourceK8sCPUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceK8sControlPlaneUpdate: called with id %s, rg %d", d.Id(), d.Get("rg_id").(int))
c := m.(*controller.ControllerCfg)
haveRGID, err := existRGID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveRGID {
return diag.Errorf("resourceK8sUpdate: can't update k8s cluster because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
}
haveK8sciID, err := existK8sCIID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveK8sciID {
return diag.Errorf("resourceK8sUpdate: can't update k8s cluster because K8sCIID %d is not allowed or does not exist", d.Get("k8sci_id").(int))
}
cluster, err := utilityK8sCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
hasChanged := false
switch cluster.Status {
case status.Modeled:
return diag.Errorf("The k8s cluster is in status: %s, please, contact support for more information", cluster.Status)
case status.Creating:
case status.Created:
case status.Deleting:
case status.Deleted:
id, _ := strconv.ParseUint(d.Id(), 10, 64)
restoreReq := k8s.RestoreRequest{
K8SID: id,
}
_, err := c.CloudAPI().K8S().Restore(ctx, restoreReq)
if err != nil {
return diag.FromErr(err)
}
enableReq := k8s.DisableEnableRequest{
K8SID: id,
}
_, err = c.CloudAPI().K8S().Enable(ctx, enableReq)
if err != nil {
return diag.FromErr(err)
}
hasChanged = true
case status.Destroying:
return diag.Errorf("The k8s cluster is in progress with status: %s", cluster.Status)
case status.Destroyed:
d.SetId("")
return resourceK8sCreate(ctx, d, m)
case status.Enabling:
case status.Enabled:
case status.Disabling:
case status.Disabled:
log.Debugf("The k8s cluster is in status: %s, troubles may occur with update. Please, enable compute first.", cluster.Status)
case status.Restoring:
}
if hasChanged {
cluster, err = utilityK8sCheckPresence(ctx, d, m)
if cluster == nil {
d.SetId("")
if err != nil {
return diag.FromErr(err)
}
return nil
}
}
updateReq := k8s.UpdateRequest{K8SID: cluster.ID}
doBasicUpdate := false
if d.HasChange("name") {
updateReq.Name = d.Get("name").(string)
doBasicUpdate = true
}
if d.HasChange("desc") {
updateReq.Description = d.Get("desc").(string)
doBasicUpdate = true
}
if doBasicUpdate {
_, err := c.CloudAPI().K8S().Update(ctx, updateReq)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("start") {
if d.Get("start").(bool) {
if cluster.TechStatus == "STOPPED" {
req := k8s.StartRequest{
K8SID: cluster.ID,
}
_, err := c.CloudAPI().K8S().Start(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
} else {
if cluster.TechStatus == "STARTED" {
req := k8s.StopRequest{
K8SID: cluster.ID,
}
_, err := c.CloudAPI().K8S().Stop(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
}
if d.HasChange("num") {
oldVal, newVal := d.GetChange("num")
if oldVal.(int) > newVal.(int) {
ids := make([]string, 0)
for i := oldVal.(int) - 1; i >= newVal.(int); i-- {
id := cluster.K8SGroups.Masters.DetailedInfo[i].ID
ids = append(ids, strconv.FormatUint(id, 10))
}
req := k8s.DeleteMasterFromGroupRequest{
K8SID: cluster.ID,
MasterGroupID: cluster.K8SGroups.Masters.ID,
MasterIDs: ids,
}
_, err := c.CloudAPI().K8S().DeleteMasterFromGroup(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
return nil
}
func resourceK8sCPDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceK8sControlPlaneDelete: called with id %s, rg %d", d.Id(), d.Get("rg_id").(int))
cluster, err := utilityK8sCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
req := k8s.DeleteRequest{
K8SID: cluster.ID,
Permanently: true,
}
_, err = c.CloudAPI().K8S().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
return nil
}
func resourceK8sCPSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of the cluster.",
},
"rg_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "Resource group ID that this instance belongs to.",
},
"k8sci_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "ID of the k8s catalog item to base this instance on.",
},
"network_plugin": {
Type: schema.TypeString,
Required: true,
Description: "Network plugin to be used",
ValidateFunc: validation.StringInSlice([]string{"flannel", "weavenet", "calico"}, true),
},
"num": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ValidateFunc: validation.IntInSlice([]int{1, 3}),
Description: "Number of VMs to create. Can be either 1 or 3",
},
"cpu": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Node CPU count.",
},
"ram": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Node RAM in MB.",
},
"disk": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Node boot disk size in GB.",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Storage Endpoint ID",
},
"sep_pool": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Storage Endpoint Pool",
},
"with_lb": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "Create k8s with load balancer if true.",
},
"extnet_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
Description: "ID of the external network to connect workers to. If omitted network will be chosen by the platfom.",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Description: "Text description of this instance.",
},
"start": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "Start k8s cluster.",
},
"detailed_info": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: detailedInfoSchemaMake(),
},
},
"master_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Master group ID.",
},
"master_name": {
Type: schema.TypeString,
Computed: true,
Description: "Master group name.",
},
"acl": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: aclGroupSchemaMake(),
},
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"bservice_id": {
Type: schema.TypeInt,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"k8s_ci_name": {
Type: schema.TypeString,
Computed: true,
},
"lb_id": {
Type: schema.TypeInt,
Computed: true,
},
"k8s_id": {
Type: schema.TypeInt,
Computed: true,
},
"lb_ip": {
Type: schema.TypeString,
Computed: true,
Description: "IP address of default load balancer.",
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
"kubeconfig": {
Type: schema.TypeString,
Computed: true,
Description: "Kubeconfig for cluster access.",
},
"vins_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of default vins for this instace.",
},
}
}
func ResourceK8sCP() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourceK8sCPCreate,
ReadContext: resourceK8sCPRead,
UpdateContext: resourceK8sCPUpdate,
DeleteContext: resourceK8sCPDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout30m,
Read: &constants.Timeout600s,
Update: &constants.Timeout600s,
Delete: &constants.Timeout600s,
Default: &constants.Timeout600s,
},
Schema: resourceK8sCPSchemaMake(),
}
}

View File

@@ -55,16 +55,35 @@ func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interfac
}
if !haveK8sID {
return diag.Errorf("resourceK8sCreate: can't create k8s cluster because K8sID %d is not allowed or does not exist", d.Get("k8s_id").(int))
return diag.Errorf("resourceK8sWgCreate: can't create k8s cluster because K8sID %d is not allowed or does not exist", d.Get("k8s_id").(int))
}
c := m.(*controller.ControllerCfg)
req := k8s.WorkersGroupAddRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
Name: d.Get("name").(string),
WorkerNum: uint64(d.Get("num").(int)),
WorkerCPU: uint64(d.Get("cpu").(int)),
WorkerRAM: uint64(d.Get("ram").(int)),
K8SID: uint64(d.Get("k8s_id").(int)),
Name: d.Get("name").(string),
WorkerNum: uint64(d.Get("num").(int)),
WorkerCPU: uint64(d.Get("cpu").(int)),
WorkerRAM: uint64(d.Get("ram").(int)),
WorkerSEPID: uint64(d.Get("worker_sep_id").(int)),
WorkerSEPPool: d.Get("worker_sep_pool").(string),
}
labels, _ := d.Get("labels").([]interface{})
for _, label := range labels {
if !strings.HasPrefix(label.(string), "workersGroupName") {
req.Labels = append(req.Labels, label.(string))
}
}
annotations, _ := d.Get("annotations").([]interface{})
for _, annotation := range annotations {
req.Annotations = append(req.Annotations, annotation.(string))
}
taints, _ := d.Get("taints").([]interface{})
for _, taint := range taints {
req.Taints = append(req.Taints, taint.(string))
}
if d.Get("disk") == nil {
@@ -111,6 +130,7 @@ func resourceK8sWgRead(ctx context.Context, d *schema.ResourceData, m interface{
} else {
d.Set("k8s_id", d.Get("k8s_id"))
}
d.SetId(strings.Split(d.Id(), "#")[0])
flattenWg(d, *wg, workersComputeList)
@@ -234,6 +254,38 @@ func resourceK8sWgSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Worker node boot disk size. If unspecified or 0, size is defined by OS image size.",
},
"labels": {
Type: schema.TypeList,
Computed: true,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"annotations": {
Type: schema.TypeList,
Computed: true,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"taints": {
Type: schema.TypeList,
Computed: true,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"worker_sep_id": {
Type: schema.TypeInt,
Optional: true,
},
"worker_sep_pool": {
Type: schema.TypeString,
Optional: true,
},
"wg_id": {
Type: schema.TypeInt,
Computed: true,
@@ -246,31 +298,10 @@ func resourceK8sWgSchemaMake() map[string]*schema.Schema {
Schema: detailedInfoSchemaMake(),
},
},
"labels": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"annotations": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"taints": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
}
}
@@ -288,11 +319,11 @@ func ResourceK8sWg() *schema.Resource {
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout600s,
Read: &constants.Timeout300s,
Update: &constants.Timeout300s,
Delete: &constants.Timeout300s,
Default: &constants.Timeout300s,
Create: &constants.Timeout20m,
Read: &constants.Timeout20m,
Update: &constants.Timeout20m,
Delete: &constants.Timeout20m,
Default: &constants.Timeout20m,
},
Schema: resourceK8sWgSchemaMake(),

View File

@@ -35,6 +35,7 @@ package k8s
import (
"context"
"strconv"
"strings"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
@@ -42,10 +43,145 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func handleWorkersChange(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, cluster *k8s.RecordK8S) error {
o, n := d.GetChange("workers")
old_set, _ := o.([]interface{})
new_set, _ := n.([]interface{})
old_len := len(old_set)
new_len := len(new_set)
if old_len > new_len {
deleted := workersDifference(old_set, new_set)
if err := deleteWGs(ctx, c, cluster, deleted); err != nil {
return err
}
} else if old_len < new_len {
added := workersDifference(old_set, new_set)
if err := addWGs(ctx, c, cluster, added); err != nil {
return err
}
}
if err := updateNum(ctx, c, cluster, old_set, new_set); err != nil {
return err
}
return nil
}
func updateNum(ctx context.Context, c *controller.ControllerCfg, cluster *k8s.RecordK8S, old_set []interface{}, new_set []interface{}) error {
for _, valOld := range old_set {
wgOld, _ := valOld.(map[string]interface{})
for _, valNew := range new_set {
wgNew, _ := valNew.(map[string]interface{})
if wgOld["id"] == wgNew["id"] {
oldNum := wgOld["num"].(int)
newNum := wgNew["num"].(int)
if oldNum < newNum {
req := k8s.WorkerAddRequest{
K8SID: cluster.ID,
WorkersGroupID: uint64(wgNew["id"].(int)),
Num: uint64(newNum - oldNum),
}
_, err := c.CloudAPI().K8S().WorkerAdd(ctx, req)
if err != nil {
return err
}
} else if oldNum > newNum {
for i := oldNum - 1; i >= newNum; i-- {
detailedInfo := wgOld["detailed_info"].([]interface{})
if len(detailedInfo) == 0 {
return nil
}
req := k8s.DeleteWorkerFromGroupRequest{
K8SID: cluster.ID,
WorkersGroupID: uint64(wgNew["id"].(int)),
WorkerID: uint64(detailedInfo[i].(map[string]interface{})["compute_id"].(int)),
}
_, err := c.CloudAPI().K8S().DeleteWorkerFromGroup(ctx, req)
if err != nil {
return err
}
}
}
}
}
}
return nil
}
func deleteWGs(ctx context.Context, c *controller.ControllerCfg, cluster *k8s.RecordK8S, deleted []interface{}) error {
for _, elem := range deleted {
found_wg := elem.(map[string]interface{})
req := k8s.WorkersGroupDeleteRequest{
K8SID: cluster.ID,
WorkersGroupID: uint64(found_wg["id"].(int)),
}
_, err := c.CloudAPI().K8S().WorkersGroupDelete(ctx, req)
if err != nil {
return err
}
}
return nil
}
func addWGs(ctx context.Context, c *controller.ControllerCfg, cluster *k8s.RecordK8S, added []interface{}) error {
for _, elem := range added {
found_wg := elem.(map[string]interface{})
req := k8s.WorkersGroupAddRequest{
K8SID: cluster.ID,
Name: found_wg["name"].(string),
WorkerSEPID: uint64(found_wg["sep_id"].(int)),
WorkerSEPPool: found_wg["sep_pool"].(string),
WorkerNum: uint64(found_wg["num"].(int)),
WorkerCPU: uint64(found_wg["cpu"].(int)),
WorkerRAM: uint64(found_wg["ram"].(int)),
WorkerDisk: uint64(found_wg["disk"].(int)),
}
labels, _ := found_wg["labels"].([]interface{})
for _, label := range labels {
if !strings.HasPrefix(label.(string), "workersGroupName") {
req.Labels = append(req.Labels, label.(string))
}
}
annotations, _ := found_wg["annotations"].([]interface{})
for _, annotation := range annotations {
req.Annotations = append(req.Annotations, annotation.(string))
}
taints, _ := found_wg["taints"].([]interface{})
for _, taint := range taints {
req.Taints = append(req.Taints, taint.(string))
}
_, err := c.CloudAPI().K8S().WorkersGroupAdd(ctx, req)
if err != nil {
return err
}
}
return nil
}
func utilityK8sCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8S, error) {
c := m.(*controller.ControllerCfg)
k8sID, _ := strconv.ParseUint(d.Id(), 10, 64)
var k8sID uint64
if d.Id() != "" {
k8sID, _ = strconv.ParseUint(d.Id(), 10, 64)
} else {
k8sID = uint64(d.Get("k8s_id").(int))
}
req := k8s.GetRequest{
K8SID: k8sID,
}
@@ -112,3 +248,29 @@ func utilityK8sListDeletedCheckPresence(ctx context.Context, d *schema.ResourceD
return k8sList, nil
}
func workersDifference(slice1 []interface{}, slice2 []interface{}) []interface{} {
var diff []interface{}
for i := 0; i < 2; i++ {
for _, s1 := range slice1 {
found := false
for _, s2 := range slice2 {
if s1.(map[string]interface{})["id"] == s2.(map[string]interface{})["id"] {
found = true
break
}
}
if !found {
diff = append(diff, s1)
}
}
if i == 0 {
slice1, slice2 = slice2, slice1
}
}
return diff
}

View File

@@ -47,9 +47,16 @@ func dataSourceComputeListRead(ctx context.Context, d *schema.ResourceData, m in
return diag.FromErr(err)
}
result := computeList
if d.Get("ignore_k8s").(bool) {
// matches automatically generated names like "s234-g2134-c1" etc
result = matchComputes(computeList)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenComputeList(computeList))
d.Set("items", flattenComputeList(result))
return nil
}
@@ -325,7 +332,7 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
}
}
func dataSourceCompputeListSchemaMake() map[string]*schema.Schema {
func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"includedeleted": {
Type: schema.TypeBool,
@@ -339,7 +346,12 @@ func dataSourceCompputeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Optional: true,
},
"ignore_k8s": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "If set to true, ignores any VMs associated with any k8s cluster",
},
"items": {
Type: schema.TypeList,
Computed: true,
@@ -362,6 +374,6 @@ func DataSourceComputeList() *schema.Resource {
Default: &constants.Timeout60s,
},
Schema: dataSourceCompputeListSchemaMake(),
Schema: dataSourceComputeListSchemaMake(),
}
}

View File

@@ -296,7 +296,6 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) er
log.Debugf("flattenCompute: ID %d, RG ID %d", computeRec.ID, computeRec.RGID)
devices, _ := json.Marshal(computeRec.Devices)
userdata, _ := json.Marshal(computeRec.Userdata)
bootDisk := findBootDisk(computeRec.Disks)
//check extraDisks, ipa_type, is,
@@ -307,16 +306,13 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) er
d.Set("affinity_weight", computeRec.AffinityWeight)
d.Set("arch", computeRec.Architecture)
d.Set("boot_order", computeRec.BootOrder)
d.Set("boot_disk_size", computeRec.BootDiskSize)
d.Set("boot_disk_size", bootDisk.SizeMax)
d.Set("boot_disk", flattenBootDisk(bootDisk))
d.Set("boot_disk_id", bootDisk.ID)
d.Set("sep_id", bootDisk.SepID)
d.Set("pool", bootDisk.Pool)
d.Set("clone_reference", computeRec.CloneReference)
d.Set("clones", computeRec.Clones)
if string(userdata) != "{}" {
d.Set("cloud_init", string(userdata))
}
d.Set("computeci_id", computeRec.ComputeCIID)
d.Set("created_by", computeRec.CreatedBy)
d.Set("created_time", computeRec.CreatedTime)

View File

@@ -35,6 +35,7 @@ package kvmvm
import (
"context"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
@@ -142,8 +143,8 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
if ok {
userdata := argVal.(string)
if userdata != "" && userdata != "applied" {
createReqPPC.Userdata = userdata
createReqX86.Userdata = userdata
createReqPPC.Userdata = strings.TrimSpace(userdata)
createReqX86.Userdata = strings.TrimSpace(userdata)
}
}
@@ -215,7 +216,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
argVal, ok = d.GetOk("network")
if ok && argVal.(*schema.Set).Len() > 0 {
log.Debugf("resourceComputeCreate: calling utilityComputeNetworksConfigure to attach %d network(s)", argVal.(*schema.Set).Len())
err = utilityComputeNetworksConfigure(ctx, d, m, false, true)
err = utilityComputeNetworksConfigure(ctx, d, m, false, true, computeId)
if err != nil {
log.Errorf("resourceComputeCreate: error when attaching networks to a new Compute ID %d: %s", computeId, err)
cleanup = true
@@ -373,7 +374,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
req := compute.PFWAddRequest{
ComputeID: computeId,
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
PublicPortEnd: uint64(pfwItem["public_port_end"].(int)),
PublicPortEnd: int64(pfwItem["public_port_end"].(int)),
LocalBasePort: uint64(pfwItem["local_port"].(int)),
Proto: pfwItem["proto"].(string),
}
@@ -592,7 +593,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
return diag.FromErr(err)
}
}
log.Debugf("resourceComputeUpdate: enable=%t Compute ID %s after completing its resource configuration", d.Id(), enabled)
log.Debugf("resourceComputeUpdate: enable=%s Compute ID %v after completing its resource configuration", d.Id(), enabled)
}
// check compute statuses
@@ -630,6 +631,8 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
Force: true,
}
warnings := dc.Warnings{}
oldCpu, newCpu := d.GetChange("cpu")
if oldCpu.(int) != newCpu.(int) {
resizeReq.CPU = uint64(newCpu.(int))
@@ -691,16 +694,21 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
}
}
err = utilityComputeNetworksConfigure(ctx, d, m, true, false) // pass do_delta = true to apply changes, if any
if err != nil {
return diag.FromErr(err)
if d.HasChange("network") {
err = utilityComputeNetworksConfigure(ctx, d, m, true, false, computeRec.ID) // pass do_delta = true to apply changes, if any
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("description") || d.HasChange("name") {
req := compute.UpdateRequest{
ComputeID: computeRec.ID,
Name: d.Get("name").(string),
Description: d.Get("desc").(string),
ComputeID: computeRec.ID,
Name: d.Get("name").(string),
}
if desc, ok := d.GetOk("desc"); ok {
req.Description = desc.(string)
}
if _, err := c.CloudAPI().Compute().Update(ctx, req); err != nil {
@@ -1059,7 +1067,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
_, err := c.CloudAPI().Compute().PFWDel(ctx, req)
if err != nil {
return diag.FromErr(err)
warnings.Add(err)
}
}
}
@@ -1071,7 +1079,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
req := compute.PFWAddRequest{
ComputeID: computeRec.ID,
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
PublicPortEnd: uint64(pfwItem["public_port_end"].(int)),
PublicPortEnd: int64(pfwItem["public_port_end"].(int)),
LocalBasePort: uint64(pfwItem["local_port"].(int)),
Proto: pfwItem["proto"].(string),
}
@@ -1311,7 +1319,8 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
// we may reuse dataSourceComputeRead here as we maintain similarity
// between Compute resource and Compute data source schemas
return resourceComputeRead(ctx, d, m)
defer resourceComputeRead(ctx, d, m)
return warnings.Get()
}
func isChangeDisk(els []interface{}, el interface{}) bool {
@@ -1526,7 +1535,6 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.",
},
@@ -1696,6 +1704,7 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
"network": {
Type: schema.TypeSet,
Computed: true,
Optional: true,
MinItems: 1,
MaxItems: constants.MaxNetworksPerCompute,
@@ -1826,7 +1835,7 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
"started": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Default: true,
Description: "Is compute started.",
},
"detach_disks": {

View File

@@ -34,6 +34,7 @@ package kvmvm
import (
"context"
"regexp"
"strconv"
log "github.com/sirupsen/logrus"
@@ -43,6 +44,16 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func matchComputes(computeList compute.ListComputes) compute.ListComputes {
matched, _ := regexp.Compile("[a-zA-Z]+\\d+-[a-zA-Z]+\\d+-[a-zA-Z]+\\d+")
result := computeList.FilterFunc(func(ic compute.ItemCompute) bool {
res := matched.Match([]byte(ic.Name))
return !res
})
return result
}
func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error {
c := m.(*controller.ControllerCfg)
@@ -144,11 +155,21 @@ func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceDa
return nil
}
func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool, skip_zero bool) error {
func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool, skip_zero bool, computeID uint64) error {
c := m.(*controller.ControllerCfg)
old_set, new_set := d.GetChange("network")
req := compute.StopRequest{
ComputeID: computeID,
Force: true,
}
_, err := c.CloudAPI().Compute().Stop(ctx, req)
if err != nil {
return err
}
apiErrCount := 0
var lastSavedError error
@@ -232,6 +253,14 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
}
}
startReq := compute.StartRequest{ComputeID: computeID}
_, err = c.CloudAPI().Compute().Start(ctx, startReq)
if err != nil {
apiErrCount++
lastSavedError = err
}
if apiErrCount > 0 {
log.Errorf("utilityComputeNetworksConfigure: there were %d error(s) when managing networks of Compute ID %s. Last error was: %s",
apiErrCount, d.Id(), lastSavedError)

View File

@@ -139,8 +139,8 @@ func resourceLBBackendServerDelete(ctx context.Context, d *schema.ResourceData,
c := m.(*controller.ControllerCfg)
req := lb.BackendServerDeleteRequest{
LBID: uint64(d.Get("lb_id").(int)),
BackendName: d.Get("name").(string),
ServerName: d.Get("backend_name").(string),
BackendName: d.Get("backend_name").(string),
ServerName: d.Get("name").(string),
}
_, err = c.CloudAPI().LB().BackendServerDelete(ctx, req)

View File

@@ -57,7 +57,7 @@ func resourcePfwCreate(ctx context.Context, d *schema.ResourceData, m interface{
}
if portEnd, ok := d.GetOk("public_port_end"); ok {
req.PublicPortEnd = uint64(portEnd.(int))
req.PublicPortEnd = int64(portEnd.(int))
}
pfwId, err := c.CloudAPI().Compute().PFWAdd(ctx, req)

View File

@@ -578,7 +578,7 @@ func resourceVinsUpdate(ctx context.Context, d *schema.ResourceData, m interface
natRule := natRuleInterface.(map[string]interface{})
req := vins.NATRuleDelRequest{
VINSID: vinsData.ID,
RuleID: uint64(natRule["rule_id"].(int)),
RuleID: int64(natRule["rule_id"].(int)),
}
_, err := c.CloudAPI().VINS().NATRuleDel(ctx, req)
@@ -847,11 +847,11 @@ func ResourceVins() *schema.Resource {
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout600s,
Read: &constants.Timeout300s,
Update: &constants.Timeout300s,
Delete: &constants.Timeout300s,
Default: &constants.Timeout300s,
Create: &constants.Timeout20m,
Read: &constants.Timeout600s,
Update: &constants.Timeout20m,
Delete: &constants.Timeout600s,
Default: &constants.Timeout600s,
},
Schema: resourceVinsSchemaMake(),

View File

@@ -55,7 +55,7 @@ func resourcePfwCreate(ctx context.Context, d *schema.ResourceData, m interface{
req.Proto = d.Get("proto").(string)
if portEnd, ok := d.GetOk("public_port_end"); ok {
req.PublicPortEnd = uint64(portEnd.(int))
req.PublicPortEnd = int64(portEnd.(int))
}
pfwId, err := c.CloudBroker().Compute().PFWAdd(ctx, req)

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -14,8 +14,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -0,0 +1,32 @@
#Расскомментируйте этот код,
#и внесите необходимые правки в версию и путь,
#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером
/*
terraform {
required_providers {
decort = {
source = "basis/decort/decort"
version = "<VERSION>"
}
}
}
*/
provider "decort" {
authenticator = "oauth2"
oauth2_url = "https://sso.digitalenergy.online"
controller_url = "https://mr4.digitalenergy.online"
app_id = ""
app_secret = ""
}
data "decort_flipgroup" "fg" {
# ID флипгруппы
# Обязательный параметр
# int
flipgroup_id = 999
}
output "fg_out" {
value = data.decort_flipgroup.fg
}

View File

@@ -0,0 +1,37 @@
#Расскомментируйте этот код,
#и внесите необходимые правки в версию и путь,
#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером
/*
terraform {
required_providers {
decort = {
source = "basis/decort/decort"
version = "<VERSION>"
}
}
}
*/
provider "decort" {
authenticator = "oauth2"
oauth2_url = "https://sso.digitalenergy.online"
controller_url = "https://mr4.digitalenergy.online"
app_id = ""
app_secret = ""
}
data "decort_flipgroup_list" "fg" {
# Номер страницы
# Опциональный параметр
# int
page = 0
# Размер страницы
# Опциональный параметр
# int
size = 0
}
output "fg_out" {
value = data.decort_flipgroup_list.fg
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -0,0 +1,31 @@
#Расскомментируйте этот код,
#и внесите необходимые правки в версию и путь,
#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером
/*
terraform {
required_providers {
decort = {
source = "basis/decort/decort"
version = "<VERSION>"
}
}
}
*/
provider "decort" {
authenticator = "oauth2"
oauth2_url = "https://sso.digitalenergy.online"
controller_url = "https://mr4.digitalenergy.online"
app_id = ""
app_secret = ""
}
data "decort_k8s_computes" "computes" {
# ID кластера
# Обязательный параметр
k8s_id = 999
}
output "computes_out" {
value = data.decort_k8s_computes.computes
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}
@@ -36,13 +36,18 @@ data "decort_kvmvm_list" "compute_list" {
#опциональный параметр
#тип - число
#если не задан - выводятся все доступные данные
page = 1
page = 1
#размер страницы
#опциональный параметр
#тип - число
#если не задан - выводятся все доступные данные
size = 1
# Включить в список вывода ВМ, принадлежащие k8s кластерам (при значении параметра true)
# опциональный параметр
# bool (default = false)
ignore_k8s = true
}
output "output" {

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -12,8 +12,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -10,8 +10,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
/* terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
} */

View File

@@ -9,8 +9,8 @@
/* terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
} */

View File

@@ -9,8 +9,8 @@
/* terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
} */

View File

@@ -9,8 +9,8 @@
/* terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
} */

View File

@@ -9,8 +9,8 @@
/* terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
} */

View File

@@ -9,8 +9,8 @@
/* terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
} */

View File

@@ -9,8 +9,8 @@
/* terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
} */

View File

@@ -9,8 +9,8 @@
/* terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
} */

View File

@@ -9,8 +9,8 @@
/* terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
} */

View File

@@ -9,8 +9,8 @@
/* terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
} */

View File

@@ -9,8 +9,8 @@
/* terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
} */

View File

@@ -11,8 +11,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

View File

@@ -9,8 +9,8 @@
terraform {
required_providers {
decort = {
version = "1.1"
source = "digitalenergy.online/decort/decort"
version = "<VERSION>"
source = "basis/decort/decort"
}
}
}

Some files were not shown because too many files have changed in this diff Show More