This commit is contained in:
2024-11-12 13:41:38 +03:00
parent 040af43607
commit 36879efd58
517 changed files with 37877 additions and 1900 deletions

View File

@@ -126,6 +126,11 @@ func dataSourceAccountFlipGroupsListSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "Filter by flipgroup ID",
},
"sort_by": {
Type: schema.TypeString,
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": {
Type: schema.TypeInt,
Optional: true,

View File

@@ -315,9 +315,8 @@ func resourceAccountUpdate(ctx context.Context, d *schema.ResourceData, m interf
userConv := user.(map[string]interface{})
req := account.DeleteUserRequest{
AccountID: accountId,
UserID: userConv["user_id"].(string),
RecursiveDelete: userConv["recursive_delete"].(bool),
AccountID: accountId,
UserID: userConv["user_id"].(string),
}
_, err := c.CloudAPI().Account().DeleteUser(ctx, req)
if err != nil {
@@ -381,8 +380,7 @@ func isChangedUser(els []interface{}, el interface{}) bool {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["user_id"].(string) == elConv["user_id"].(string) &&
(!strings.EqualFold(elOldConv["access_type"].(string), elConv["access_type"].(string)) ||
elOldConv["recursive_delete"].(bool) != elConv["recursive_delete"].(bool)) {
(!strings.EqualFold(elOldConv["access_type"].(string), elConv["access_type"].(string))) {
return true
}
}
@@ -424,11 +422,6 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Required: true,
},
"recursive_delete": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
},
},
},

View File

@@ -72,6 +72,10 @@ func utilityAccountFlipGroupsListCheckPresence(ctx context.Context, d *schema.Re
req.ByIP = by_ip.(string)
}
if sortBy, ok := d.GetOk("sort_by"); ok {
req.SortBy = sortBy.(string)
}
if flipgroup_id, ok := d.GetOk("flipgroup_id"); ok {
req.FLIPGroupID = uint64(flipgroup_id.(int))
}

View File

@@ -0,0 +1,128 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Sergey Kisil, <svkisil@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package audit
import (
"context"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceAuditRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
auditRec, err := utilityAuditCheckPresence(ctx, d, m)
if err != nil {
d.SetId("") // ensure ID is empty in this case
return diag.FromErr(err)
}
flattenAudit(d, auditRec)
d.SetId(d.Get("audit_guid").(string))
return nil
}
func DataSourceAudit() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAuditRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAuditSchemaMake(),
}
}
func dataSourceAuditSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"audit_guid": {
Type: schema.TypeString,
Required: true,
Description: "audit guid",
},
"args": {
Type: schema.TypeString,
Computed: true,
},
"call": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"kwargs": {
Type: schema.TypeString,
Computed: true,
},
"remote_addr": {
Type: schema.TypeString,
Computed: true,
},
"responsetime": {
Type: schema.TypeFloat,
Computed: true,
},
"result": {
Type: schema.TypeString,
Computed: true,
},
"status_code": {
Type: schema.TypeInt,
Computed: true,
},
"tags": {
Type: schema.TypeString,
Computed: true,
},
"timestamp": {
Type: schema.TypeFloat,
Computed: true,
},
"timestamp_end": {
Type: schema.TypeFloat,
Computed: true,
},
"user": {
Type: schema.TypeString,
Computed: true,
},
}
}

View File

@@ -0,0 +1,56 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Sergey Kisil, <svkisil@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package audit
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/audit"
)
func flattenAudit(d *schema.ResourceData, au *audit.RecordAudit) {
log.Debugf("flattenAudit: decoded audit guid %s", d.Get("audit_guid").(string))
d.Set("args", au.Arguments)
d.Set("call", au.Call)
d.Set("guid", au.GUID)
d.Set("kwargs", au.Kwargs)
d.Set("remote_addr", au.RemoteAddr)
d.Set("responsetime", au.ResponseTime)
d.Set("result", au.Result)
d.Set("status_code", au.StatusCode)
d.Set("tags", au.Tags)
d.Set("timestamp", au.Timestamp)
d.Set("timestamp_end", au.TimestampEnd)
d.Set("user", au.User)
}

View File

@@ -0,0 +1,62 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Sergey Kisil, <svkisil@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package audit
import (
"context"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/audit"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityAuditCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*audit.RecordAudit, error) {
c := m.(*controller.ControllerCfg)
req := audit.GetRequest{}
if d.Id() != "" {
req.AuditGuid = d.Id()
} else {
req.AuditGuid = d.Get("audit_guid").(string)
}
log.Debugf("utilityStackCheckPresence: load audit")
auditInfo, err := c.CloudAPI().Audit().Get(ctx, req)
if err != nil {
return nil, err
}
return auditInfo, nil
}

View File

@@ -394,7 +394,7 @@ func resourceBasicServiceGroupSchemaMake() map[string]*schema.Schema {
"driver": {
Type: schema.TypeString,
Required: true,
Description: "compute driver like a KVM_X86, KVM_PPC, etc.",
Description: "compute driver like a KVM_X86, etc.",
},
///4.4.0
"sep_id": {

View File

@@ -238,12 +238,6 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
DiskID: disk.ID,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudAPI().Disks().Restore(ctx, req)
if err != nil {
warnings.Add(err)
@@ -362,7 +356,6 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface
DiskID: disk.ID,
Detach: d.Get("detach").(bool),
Permanently: d.Get("permanently").(bool),
Reason: d.Get("reason").(string),
}
c := m.(*controller.ControllerCfg)
@@ -437,12 +430,6 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "Whether to completely delete the disk, works only with non attached disks",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: "Reason for deletion",
},
"shareable": {
Type: schema.TypeBool,
Optional: true,
@@ -677,27 +664,27 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Type: schema.TypeInt,
Computed: true,
},
"pool_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"self_volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"storage_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
},

View File

@@ -177,7 +177,6 @@ func resourceDiskReplicationDelete(ctx context.Context, d *schema.ResourceData,
DiskID: disk.ID,
Detach: d.Get("detach").(bool),
Permanently: d.Get("permanently").(bool),
Reason: d.Get("reason").(string),
}
log.Debugf("resourceDiskReplicationDelete: delete disk replica for disk with ID: %d", diskId)
@@ -243,11 +242,6 @@ func resourceDiskReplicationSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "Delete disk permanently",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "Reason for disk deletion",
},
"replica_disk_id": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -0,0 +1,153 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package dpdknet
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceDPDKNetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
dpdk, err := utilityDPDKNetCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
flattenDPDKNet(d, dpdk)
return nil
}
func dataSourceDPDKNetSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"dpdk_id": {
Type: schema.TypeInt,
Required: true,
Description: "The unique ID of the subscriber-owner of the DPDK network",
},
"account_access": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of accounts with access",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Created time",
},
"desc": {
Type: schema.TypeString,
Computed: true,
Description: "Description of DPDK network",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the grid (platform)",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "DPDK network ID on the storage side",
},
"name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of network",
},
"rg_access": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of resource groups with access",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "DPDK network status",
},
"ovs_bridge": {
Type: schema.TypeString,
Computed: true,
Description: "OVS bridge in which interfaces for computers created",
},
"vlan_id": {
Type: schema.TypeInt,
Computed: true,
Description: "vlan ID",
},
"compute_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "Compute IDs which uses this DPDK network",
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Updated time",
},
}
return res
}
func DataSourceDPDKNet() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDPDKNetRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDPDKNetSchemaMake(),
}
}

View File

@@ -0,0 +1,212 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package dpdknet
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceDPDKNetListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
dpdkList, err := utilityDPDKNetListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenDPDKNetList(dpdkList))
d.Set("entry_count", dpdkList.EntryCount)
return nil
}
func dataSourceDPDKNetListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by ID",
},
"gid": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by GID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by name",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Description: "Find by description",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Find by status",
},
"compute_ids": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "Find by compute IDs",
},
"sort_by": {
Type: schema.TypeString,
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"dpdk_id": {
Type: schema.TypeInt,
Required: true,
Description: "The unique ID of the subscriber-owner of the DPDK network",
},
"account_access": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of accounts with access",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Created time",
},
"desc": {
Type: schema.TypeString,
Computed: true,
Description: "Description of DPDK network",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the grid (platform)",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "DPDK network ID on the storage side",
},
"name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of network",
},
"rg_access": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of resource groups with access",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "DPDK network status",
},
"ovs_bridge": {
Type: schema.TypeString,
Computed: true,
Description: "OVS bridge in which interfaces for computers created",
},
"vlan_id": {
Type: schema.TypeInt,
Computed: true,
Description: "vlan ID",
},
"compute_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "Compute IDs which uses this DPDK network",
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Updated time",
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceDPDKNetList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDPDKNetListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDPDKNetListSchemaMake(),
}
}

View File

@@ -0,0 +1,44 @@
package dpdknet
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
dpdk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/dpdknet"
)
func flattenDPDKNet(d *schema.ResourceData, dpdk *dpdk.RecordDPDKNet) {
d.Set("dpdk_id", dpdk.ID)
d.Set("account_access", dpdk.AccountAccess)
d.Set("created_time", dpdk.CreatedTime)
d.Set("desc", dpdk.Description)
d.Set("gid", dpdk.GID)
d.Set("guid", dpdk.GUID)
d.Set("name", dpdk.Name)
d.Set("rg_access", dpdk.RGAccess)
d.Set("status", dpdk.Status)
d.Set("ovs_bridge", dpdk.OVSBridge)
d.Set("vlan_id", dpdk.VlanID)
d.Set("compute_ids", dpdk.ComputeIDs)
d.Set("updated_time", dpdk.UpdatedTime)
}
func flattenDPDKNetList(list *dpdk.ListDPDKNet) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(list.Data))
for _, dpdk := range list.Data {
temp := map[string]interface{}{
"dpdk_id": dpdk.ID,
"account_access": dpdk.AccountAccess,
"desc": dpdk.Description,
"gid": dpdk.GID,
"guid": dpdk.GUID,
"name": dpdk.Name,
"rg_access": dpdk.RGAccess,
"status": dpdk.Status,
"ovs_bridge": dpdk.OVSBridge,
"vlan_id": dpdk.VlanID,
"compute_ids": dpdk.ComputeIDs,
"updated_time": dpdk.UpdatedTime,
}
res = append(res, temp)
}
return res
}

View File

@@ -0,0 +1,68 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package dpdknet
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
dpdk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/dpdknet"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func utilityDPDKNetCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*dpdk.RecordDPDKNet, error) {
c := m.(*controller.ControllerCfg)
req := dpdk.GetRequest{}
if d.Get("dpdk_id") != nil {
if d.Get("dpdk_id").(int) == 0 {
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.DPDKID = id
} else {
req.DPDKID = uint64(d.Get("dpdk_id").(int))
}
} else {
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.DPDKID = id
}
log.Debugf("utilityDPDKCheckPresence: get DPDK network")
dpdk, err := c.CloudAPI().DPDKNet().Get(ctx, req)
if err != nil {
return nil, err
}
return dpdk, nil
}

View File

@@ -0,0 +1,87 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package dpdknet
import (
"context"
log "github.com/sirupsen/logrus"
dpdk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/dpdknet"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityDPDKNetListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*dpdk.ListDPDKNet, error) {
c := m.(*controller.ControllerCfg)
req := dpdk.ListRequest{}
if byID, ok := d.GetOk("by_id"); ok {
req.ByID = uint64(byID.(int))
}
if GID, ok := d.GetOk("gid"); ok {
req.GID = uint64(GID.(int))
}
if name, ok := d.GetOk("name"); ok {
req.Name = name.(string)
}
if desc, ok := d.GetOk("description"); ok {
req.Description = desc.(string)
}
if status, ok := d.GetOk("status"); ok {
req.Status = status.(string)
}
if computeIDs, ok := d.GetOk("compute_ids"); ok {
IDs := computeIDs.([]interface{})
for _, ID := range IDs {
req.ComputeIDs = append(req.ComputeIDs, uint64(ID.(int)))
}
}
if sortBy, ok := d.GetOk("sort_by"); ok {
req.SortBy = sortBy.(string)
}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
log.Debugf("utilityDPDKListCheckPresence: load DPDK network list")
dpdkList, err := c.CloudAPI().DPDKNet().List(ctx, req)
if err != nil {
return nil, err
}
return dpdkList, nil
}

View File

@@ -124,7 +124,7 @@ func dataSourceFlipgroupListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
Type: schema.TypeString,
},
Description: "client_ids",
},

View File

@@ -189,7 +189,7 @@ func utilityFlipgroupListCheckPresence(ctx context.Context, d *schema.ResourceDa
if cliensId, ok := d.GetOk("client_ids"); ok {
cliensIds := cliensId.([]interface{})
for _, elem := range cliensIds {
req.ClientIDs = append(req.ClientIDs, uint64(elem.(int)))
req.ClientIDs = append(req.ClientIDs, (elem.(string)))
}
}
if status, ok := d.GetOk("status"); ok {

View File

@@ -132,6 +132,11 @@ func dataSourceImageListSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "page size",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Description: "find by enabled True or False",
},
"items": {
Type: schema.TypeList,
Computed: true,

View File

@@ -129,8 +129,8 @@ func resourceImageSchemaMake(sch map[string]*schema.Schema) map[string]*schema.S
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validation.StringInSlice([]string{"X86_64", "PPC64_LE"}, true),
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
ValidateFunc: validation.StringInSlice([]string{"X86_64"}, true),
Description: "binary architecture of this image, one of X86_64",
}
sch["drivers"] = &schema.Schema{
@@ -141,13 +141,6 @@ func resourceImageSchemaMake(sch map[string]*schema.Schema) map[string]*schema.S
},
}
sch["permanently"] = &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "whether to completely delete the image",
}
sch["network_interface_naming"] = &schema.Schema{
Type: schema.TypeString,
Optional: true,

View File

@@ -1,67 +1,60 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func resourceImageVirtualSchemaMake(sch map[string]*schema.Schema) map[string]*schema.Schema {
delete(sch, "show_all")
sch["name"] = &schema.Schema{
Type: schema.TypeString,
Required: true,
Description: "Name of the rescue disk",
}
sch["link_to"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
Description: "ID of real image to link this virtual image to upon creation",
}
sch["permanently"] = &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "whether to completely delete the image",
}
sch["image_id"] = &schema.Schema{
Type: schema.TypeInt,
Computed: true,
Description: "Image id",
}
return sch
}
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func resourceImageVirtualSchemaMake(sch map[string]*schema.Schema) map[string]*schema.Schema {
delete(sch, "show_all")
sch["name"] = &schema.Schema{
Type: schema.TypeString,
Required: true,
Description: "Name of the rescue disk",
}
sch["link_to"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
Description: "ID of real image to link this virtual image to upon creation",
}
sch["image_id"] = &schema.Schema{
Type: schema.TypeInt,
Computed: true,
Description: "Image id",
}
return sch
}

View File

@@ -157,10 +157,6 @@ func resourceImageDelete(ctx context.Context, d *schema.ResourceData, m interfac
ImageID: uint64(d.Get("image_id").(int)),
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudAPI().Image().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)

View File

@@ -194,10 +194,6 @@ func resourceImageFromBlankComputeDelete(ctx context.Context, d *schema.Resource
ImageID: uint64(d.Get("image_id").(int)),
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudAPI().Image().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
@@ -352,12 +348,6 @@ func resourceImageFromBlankComputeSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "create an image in async/sync mode",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "whether to completely delete the image",
},
"image_id": {
Type: schema.TypeInt,

View File

@@ -201,10 +201,6 @@ func resourceImageFromPlatformDiskDelete(ctx context.Context, d *schema.Resource
ImageID: uint64(d.Get("image_id").(int)),
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudAPI().Image().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
@@ -302,8 +298,8 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
"architecture": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"X86_64", "PPC64_LE"}, true),
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
ValidateFunc: validation.StringInSlice([]string{"X86_64"}, true),
Description: "binary architecture of this image, one of X86_64",
},
"username": {
@@ -338,11 +334,10 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
},
"drivers": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Required: true,
Elem: &schema.Schema{
StateFunc: statefuncs.StateFuncToUpper,
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86"}, false), // observe case while validating
Type: schema.TypeString,
},
Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]",
@@ -365,12 +360,6 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "create an image in async/sync mode",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "whether to completely delete the image",
},
"image_id": {
Type: schema.TypeInt,

View File

@@ -46,12 +46,12 @@ func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData,
c := m.(*controller.ControllerCfg)
req := image.ListRequest{}
if sep_id, ok := d.GetOk("sep_id"); ok {
req.SEPID = uint64(sep_id.(int))
if sepID, ok := d.GetOk("sep_id"); ok {
req.SEPID = uint64(sepID.(int))
}
if by_id, ok := d.GetOk("by_id"); ok {
req.ByID = uint64(by_id.(int))
if byID, ok := d.GetOk("by_id"); ok {
req.ByID = uint64(byID.(int))
}
if name, ok := d.GetOk("name"); ok {
@@ -66,31 +66,31 @@ func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData,
req.Architecture = architecture.(string)
}
if type_image, ok := d.GetOk("type_image"); ok {
req.TypeImage = type_image.(string)
if typeImage, ok := d.GetOk("type_image"); ok {
req.TypeImage = typeImage.(string)
}
if image_size, ok := d.GetOk("image_size"); ok {
req.ImageSize = uint64(image_size.(int))
if imageSize, ok := d.GetOk("image_size"); ok {
req.ImageSize = uint64(imageSize.(int))
}
if sep_name, ok := d.GetOk("sep_name"); ok {
req.SEPName = sep_name.(string)
if sepName, ok := d.GetOk("sep_name"); ok {
req.SEPName = sepName.(string)
}
if pool, ok := d.GetOk("pool"); ok {
req.Pool = pool.(string)
}
if public, ok := d.GetOk("public"); ok {
if public, ok := d.GetOkExists("public"); ok {
req.Public = public.(bool)
}
if hot_resize, ok := d.GetOk("hot_resize"); ok {
req.HotResize = hot_resize.(bool)
if hotResize, ok := d.GetOkExists("hot_resize"); ok {
req.HotResize = hotResize.(bool)
}
if bootable, ok := d.GetOk("bootable"); ok {
if bootable, ok := d.GetOkExists("bootable"); ok {
req.Bootable = bootable.(bool)
}
@@ -104,6 +104,9 @@ func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData,
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
if enabled, ok := d.GetOkExists("enabled"); ok {
req.Enabled = enabled.(bool)
}
log.Debugf("utilityImageListCheckPresence: load image list")
imageList, err := c.CloudAPI().Image().List(ctx, req)

View File

@@ -103,16 +103,17 @@ func dataSourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{
}
d.Set("kubeconfig", kubeconfig)
if cluster.LBID != 0 {
getLbReq := lb.GetRequest{LBID: cluster.LBID}
lb, err := c.CloudAPI().LB().Get(ctx, getLbReq)
if err != nil {
return diag.FromErr(err)
}
getLbReq := lb.GetRequest{LBID: cluster.LBID}
lb, err := c.CloudAPI().LB().Get(ctx, getLbReq)
if err != nil {
return diag.FromErr(err)
d.Set("extnet_id", lb.ExtNetID)
d.Set("lb_ip", lb.PrimaryNode.FrontendIP)
}
d.Set("extnet_id", lb.ExtNetID)
d.Set("lb_ip", lb.PrimaryNode.FrontendIP)
flattenK8sData(d, *cluster, masterComputeList, workersComputeList)
return nil
}

View File

@@ -254,6 +254,20 @@ func flattenK8sData(d *schema.ResourceData, cluster k8s.RecordK8S, masters []com
d.Set("tech_status", cluster.TechStatus)
d.Set("updated_by", cluster.UpdatedBy)
d.Set("updated_time", cluster.UpdatedTime)
d.Set("highly_available_lb", cluster.HighlyAvailableLB)
d.Set("address_vip", flattenAddressVIP(cluster.AddressVIP))
d.Set("extnet_only", cluster.ExtnetOnly)
d.Set("with_lb", cluster.WithLB)
}
func flattenAddressVIP(addressVIP k8s.K8SAddressVIP) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"backend_ip": addressVIP.BackendIP,
"frontend_ip": addressVIP.FrontendIP,
}
res = append(res, temp)
return res
}
func flattenServiceAccount(serviceAccount k8s.RecordServiceAccount) []map[string]interface{} {

View File

@@ -153,6 +153,11 @@ func workersSchemaMake() map[string]*schema.Schema {
Schema: detailedInfoSchemaMake(),
},
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Description: "Type of the emulated system.",
},
"guid": {
Type: schema.TypeString,
Computed: true,

View File

@@ -202,8 +202,13 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
if oidcCertificate, ok := d.GetOk("oidc_cert"); ok {
createReq.OidcCertificate = oidcCertificate.(string)
}
///
if chipset, ok := d.GetOk("chipset"); ok {
createReq.Chipset = chipset.(string)
}
createReq.ExtNetOnly = d.Get("extnet_only").(bool)
if extNet, ok := d.GetOk("extnet_id"); ok {
@@ -745,6 +750,12 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Description: "insert ssl certificate in x509 pem format",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Description: "Type of the emulated system.",
},
"desc": {
Type: schema.TypeString,
Optional: true,

View File

@@ -124,6 +124,10 @@ func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interfac
createReq.MasterSEPID = uint64(sepId.(int))
}
if chipset, ok := d.GetOk("chipset"); ok {
createReq.Chipset = chipset.(string)
}
if sepPool, ok := d.GetOk("sep_pool"); ok {
createReq.MasterSEPPool = sepPool.(string)
}
@@ -532,10 +536,10 @@ func resourceK8sCPUpdate(ctx context.Context, d *schema.ResourceData, m interfac
oldVal, newVal := d.GetChange("num")
if oldVal.(int) > newVal.(int) {
ids := make([]string, 0)
ids := make([]uint64, 0)
for i := oldVal.(int) - 1; i >= newVal.(int); i-- {
id := cluster.K8SGroups.Masters.DetailedInfo[i].ID
ids = append(ids, strconv.FormatUint(id, 10))
ids = append(ids, id)
}
req := k8s.DeleteMasterFromGroupRequest{
@@ -656,6 +660,11 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
),
Description: "Node RAM in MB.",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Description: "Type of the emulated system.",
},
"disk": {
Type: schema.TypeInt,
Optional: true,

View File

@@ -70,6 +70,7 @@ func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interfac
WorkerRAM: uint64(d.Get("ram").(int)),
WorkerSEPID: uint64(d.Get("worker_sep_id").(int)),
WorkerSEPPool: d.Get("worker_sep_pool").(string),
Chipset: d.Get("chipset").(string),
}
labels, _ := d.Get("labels").([]interface{})
@@ -168,6 +169,7 @@ func resourceK8sWgUpdate(ctx context.Context, d *schema.ResourceData, m interfac
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: wg.ID,
Num: uint64(newNum) - wg.Num,
Chipset: d.Get("chipset").(string),
}
_, err := c.CloudAPI().K8S().WorkerAdd(ctx, req)
@@ -253,6 +255,12 @@ func resourceK8sWgSchemaMake() map[string]*schema.Schema {
Description: "Number of worker nodes to create.",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Description: "Type of the emulated system.",
},
"cpu": {
Type: schema.TypeInt,
Optional: true,

View File

@@ -84,6 +84,7 @@ func updateNum(ctx context.Context, c *controller.ControllerCfg, cluster *k8s.Re
K8SID: cluster.ID,
WorkersGroupID: uint64(wgNew["id"].(int)),
Num: uint64(newNum - oldNum),
Chipset: wgNew["chipset"].(string),
}
_, err := c.CloudAPI().K8S().WorkerAdd(ctx, req)
@@ -145,6 +146,7 @@ func addWGs(ctx context.Context, c *controller.ControllerCfg, cluster *k8s.Recor
WorkerCPU: uint64(found_wg["cpu"].(int)),
WorkerRAM: uint64(found_wg["ram"].(int)),
WorkerDisk: uint64(found_wg["disk"].(int)),
Chipset: found_wg["chipset"].(string),
}
labels, _ := found_wg["labels"].([]interface{})

View File

@@ -260,6 +260,10 @@ func computeListDisksSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"bus_number": {
Type: schema.TypeInt,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
@@ -475,6 +479,10 @@ func computeQOSSchemaMake() map[string]*schema.Schema {
func computeInterfacesSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"bus_number": {
Type: schema.TypeInt,
Computed: true,
},
"conn_id": {
Type: schema.TypeInt,
Computed: true,
@@ -511,6 +519,10 @@ func computeInterfacesSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"mtu": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
@@ -542,6 +554,13 @@ func computeInterfacesSchemaMake() map[string]*schema.Schema {
Schema: computeQOSSchemaMake(),
},
},
"libvirt_settings": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: computeLibvirtSettingsSchemaMake(),
},
},
"target": {
Type: schema.TypeString,
Computed: true,
@@ -559,6 +578,40 @@ func computeInterfacesSchemaMake() map[string]*schema.Schema {
},
}
}
func computeLibvirtSettingsSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"txmode": {
Type: schema.TypeString,
Computed: true,
},
"ioeventfd": {
Type: schema.TypeString,
Computed: true,
},
"event_idx": {
Type: schema.TypeString,
Computed: true,
},
"queues": {
Type: schema.TypeInt,
Computed: true,
},
"rx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
"tx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
}
}
func computeOsUsersSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"guid": {
@@ -650,6 +703,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"chipset": {
Type: schema.TypeString,
Computed: true,
},
"boot_order": {
Type: schema.TypeList,
Computed: true,

View File

@@ -64,6 +64,10 @@ func dataSourceComputeListRead(ctx context.Context, d *schema.ResourceData, m in
func computeDisksSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"bus_number": {
Type: schema.TypeInt,
Computed: true,
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
@@ -136,6 +140,10 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"chipset": {
Type: schema.TypeString,
Computed: true,
},
"clones": {
Type: schema.TypeList,
Computed: true,

View File

@@ -47,8 +47,9 @@ func flattenDisks(disks []compute.InfoDisk) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, disk := range disks {
temp := map[string]interface{}{
"disk_id": disk.ID,
"pci_slot": disk.PCISlot,
"bus_number": disk.BusNumber,
"disk_id": disk.ID,
"pci_slot": disk.PCISlot,
}
res = append(res, temp)
}
@@ -69,30 +70,49 @@ func flattenInterfaces(interfaces compute.ListInterfaces) []map[string]interface
res := make([]map[string]interface{}, 0, len(interfaces))
for _, interfaceItem := range interfaces {
temp := map[string]interface{}{
"conn_id": interfaceItem.ConnID,
"conn_type": interfaceItem.ConnType,
"def_gw": interfaceItem.DefGW,
"enabled": interfaceItem.Enabled,
"flip_group_id": interfaceItem.FLIPGroupID,
"guid": interfaceItem.GUID,
"ip_address": interfaceItem.IPAddress,
"listen_ssh": interfaceItem.ListenSSH,
"mac": interfaceItem.MAC,
"name": interfaceItem.Name,
"net_id": interfaceItem.NetID,
"netmask": interfaceItem.NetMask,
"net_type": interfaceItem.NetType,
"node_id": interfaceItem.NodeID,
"pci_slot": interfaceItem.PCISlot,
"qos": flattenQOS(interfaceItem.QOS),
"target": interfaceItem.Target,
"type": interfaceItem.Type,
"vnfs": interfaceItem.VNFs,
"bus_number": interfaceItem.BusNumber,
"conn_id": interfaceItem.ConnID,
"conn_type": interfaceItem.ConnType,
"def_gw": interfaceItem.DefGW,
"enabled": interfaceItem.Enabled,
"flip_group_id": interfaceItem.FLIPGroupID,
"guid": interfaceItem.GUID,
"ip_address": interfaceItem.IPAddress,
"listen_ssh": interfaceItem.ListenSSH,
"mac": interfaceItem.MAC,
"mtu": interfaceItem.MTU,
"name": interfaceItem.Name,
"net_id": interfaceItem.NetID,
"netmask": interfaceItem.NetMask,
"net_type": interfaceItem.NetType,
"node_id": interfaceItem.NodeID,
"pci_slot": interfaceItem.PCISlot,
"qos": flattenQOS(interfaceItem.QOS),
"target": interfaceItem.Target,
"type": interfaceItem.Type,
"vnfs": interfaceItem.VNFs,
"libvirt_settings": flattenLibvirtSettings(interfaceItem.LibvirtSettings),
}
res = append(res, temp)
}
return res
}
func flattenLibvirtSettings(libvirtSettings compute.LibvirtSettings) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"guid": libvirtSettings.GUID,
"txmode": libvirtSettings.TXMode,
"ioeventfd": libvirtSettings.IOEventFD,
"event_idx": libvirtSettings.EventIDx,
"queues": libvirtSettings.Queues,
"rx_queue_size": libvirtSettings.RXQueueSize,
"tx_queue_size": libvirtSettings.TXQueueSize,
}
res = append(res, temp)
return res
}
func flattenSnapSets(snapSets compute.ListSnapSets) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(snapSets))
for _, snapSet := range snapSets {
@@ -165,6 +185,7 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
"arch": compute.Architecture,
"boot_order": compute.BootOrder,
"bootdisk_size": compute.BootDiskSize,
"chipset": compute.Chipset,
"cd_image_id": compute.CdImageId,
"clone_reference": compute.CloneReference,
"clones": compute.Clones,
@@ -348,6 +369,7 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute, pc
d.Set("cd_image_id", computeRec.CdImageId)
d.Set("sep_id", bootDisk.SepID)
d.Set("pool", bootDisk.Pool)
d.Set("chipset", computeRec.Chipset)
d.Set("clone_reference", computeRec.CloneReference)
d.Set("clones", computeRec.Clones)
d.Set("computeci_id", computeRec.ComputeCIID)
@@ -503,6 +525,7 @@ func flattenListComputeDisks(disks compute.ListComputeDisks) []map[string]interf
"acl": string(acl),
"account_id": disk.AccountID,
"boot_partition": disk.BootPartition,
"bus_number": disk.BusNumber,
"created_time": disk.CreatedTime,
"deleted_time": disk.DeletedTime,
"description": disk.Description,
@@ -590,6 +613,7 @@ func flattenDataCompute(d *schema.ResourceData, computeRec compute.RecordCompute
d.Set("affinity_weight", computeRec.AffinityWeight)
d.Set("anti_affinity_rules", flattenListRules(computeRec.AntiAffinityRules))
d.Set("arch", computeRec.Architecture)
d.Set("chipset", computeRec.Chipset)
d.Set("boot_order", computeRec.BootOrder)
d.Set("bootdisk_size", computeRec.BootDiskSize)
d.Set("cd_image_id", computeRec.CdImageId)

View File

@@ -128,7 +128,7 @@ func networkSubresourceSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Required: true,
StateFunc: statefuncs.StateFuncToUpper,
ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS", "VFNIC"}, false), // observe case while validating
ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS", "VFNIC", "DPDK"}, false), // observe case while validating
Description: "Type of the network for this connection, either EXTNET or VINS.",
},

View File

@@ -27,7 +27,7 @@ func resourceComputeResourceV1() *schema.Resource {
Required: true,
ForceNew: true,
StateFunc: statefuncs.StateFuncToUpper,
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86"}, false), // observe case while validating
Description: "Hardware architecture of this compute instance.",
},
"cpu": {
@@ -569,7 +569,7 @@ func resourceComputeResourceV2() *schema.Resource {
Required: true,
ForceNew: true,
StateFunc: statefuncs.StateFuncToUpper,
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86"}, false), // observe case while validating
Description: "Hardware architecture of this compute instance.",
},
"cpu": {

View File

@@ -38,6 +38,7 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/dpdknet"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/extnet"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/image"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
@@ -147,6 +148,23 @@ func existVFPoolId(ctx context.Context, m interface{}, id int) (int, bool) {
return id, false
}
func existDPDKNetId(ctx context.Context, m interface{}, id int) (int, bool) {
c := m.(*controller.ControllerCfg)
req := dpdknet.ListRequest{ByID: uint64(id)}
dpdkList, err := c.CloudAPI().DPDKNet().List(ctx, req)
if err != nil {
log.Debugf("Unable to retrieve vfpool list, %s", err)
return id, false
}
if len(dpdkList.Data) == 1 {
return 0, true
}
return id, false
}
func isMoreThanOneDisksTypeB(ctx context.Context, disks interface{}) bool {
count := 0

View File

@@ -42,7 +42,6 @@ import (
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/kvmppc"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/kvmx86"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
@@ -60,7 +59,6 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
log.Debugf("resourceComputeCreate: called for Compute name %q, RG ID %d", d.Get("name").(string), d.Get("rg_id").(int))
c := m.(*controller.ControllerCfg)
createReqX86 := kvmx86.CreateRequest{}
createReqPPC := kvmppc.CreateRequest{}
hasRG, err := existRgID(ctx, d, m)
if err != nil {
@@ -101,12 +99,13 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
return diag.Errorf("resourceComputeCreate: can't create compute because extnet ID %d is not allowed or does not exist", extNetId)
}
case "VFNIC":
if d.Get("driver").(string) == "KVM_PPC" {
return diag.Errorf("resourceComputeCreate: can't create compute because 'VFNIC' net_type is not allowed for driver 'KVM_PPC'")
}
if vfpoolId, ok := existVFPoolId(ctx, m, networkData["net_id"].(int)); !ok {
return diag.Errorf("resourceComputeCreate: can't create compute because vfpool ID %d is not allowed or does not exist", vfpoolId)
}
case "DPDK":
if dpdkId, ok := existDPDKNetId(ctx, m, networkData["net_id"].(int)); !ok {
return diag.Errorf("resourceComputeCreate: can't create compute because DPDK ID %d is not allowed or does not exist", dpdkId)
}
default:
continue
}
@@ -115,32 +114,26 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
argVal, ok := d.GetOk("description")
if ok {
createReqPPC.Description = argVal.(string)
createReqX86.Description = argVal.(string)
}
if sepID, ok := d.GetOk("sep_id"); ok {
createReqPPC.SEPID = uint64(sepID.(int))
createReqX86.SepID = uint64(sepID.(int))
}
if pool, ok := d.GetOk("pool"); ok {
createReqPPC.Pool = pool.(string)
createReqX86.Pool = pool.(string)
}
if ipaType, ok := d.GetOk("ipa_type"); ok {
createReqPPC.IPAType = ipaType.(string)
createReqX86.IPAType = ipaType.(string)
}
if bootSize, ok := d.GetOk("boot_disk_size"); ok {
createReqPPC.BootDisk = uint64(bootSize.(int))
createReqX86.BootDisk = uint64(bootSize.(int))
}
if IS, ok := d.GetOk("is"); ok {
createReqPPC.IS = IS.(string)
createReqX86.IS = IS.(string)
}
@@ -181,45 +174,8 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
createReqPPC.Interfaces = make([]kvmppc.Interface, 0)
if networks, ok := d.GetOk("network"); ok {
if networks.(*schema.Set).Len() > 0 {
ns := networks.(*schema.Set).List()
sort.Slice(ns, func(i, j int) bool {
weightI := ns[i].(map[string]interface{})["weight"].(int)
weightJ := ns[j].(map[string]interface{})["weight"].(int)
if weightI == 0 {
return false
}
if weightJ == 0 {
return true
}
return weightI < weightJ
})
interfaces := make([]kvmppc.Interface, 0)
for _, elem := range ns {
netInterfaceVal := elem.(map[string]interface{})
reqInterface := kvmppc.Interface{
NetType: netInterfaceVal["net_type"].(string),
NetID: uint64(netInterfaceVal["net_id"].(int)),
}
ipaddr, ipSet := netInterfaceVal["ip_address"]
if ipSet {
reqInterface.IPAddr = ipaddr.(string)
}
interfaces = append(interfaces, reqInterface)
}
createReqPPC.Interfaces = interfaces
}
}
if disks, ok := d.GetOk("disks"); ok {
disksX86 := make([]kvmx86.DataDisk, 0)
disksPPC := make([]kvmppc.DataDisk, 0)
for _, elem := range disks.([]interface{}) {
diskVal := elem.(map[string]interface{})
@@ -244,96 +200,58 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
createReqX86.DataDisks = disksX86
for _, elem := range disks.([]interface{}) {
diskVal := elem.(map[string]interface{})
reqDataDisk := kvmppc.DataDisk{
DiskName: diskVal["disk_name"].(string),
Size: uint64(diskVal["size"].(int)),
}
if sepId, ok := diskVal["sep_id"]; ok {
reqDataDisk.SepID = uint64(sepId.(int))
}
if pool, ok := diskVal["pool"]; ok {
reqDataDisk.Pool = pool.(string)
}
if desc, ok := diskVal["desc"]; ok {
reqDataDisk.Description = desc.(string)
}
if imageID, ok := diskVal["image_id"]; ok {
reqDataDisk.ImageID = uint64(imageID.(int))
}
disksPPC = append(disksPPC, reqDataDisk)
}
createReqPPC.DataDisks = disksPPC
}
argVal, ok = d.GetOk("cloud_init")
if ok {
userdata := argVal.(string)
if userdata != "" && userdata != "applied" {
createReqPPC.Userdata = strings.TrimSpace(userdata)
createReqX86.Userdata = strings.TrimSpace(userdata)
}
}
var computeId uint64
driver := d.Get("driver").(string)
if driver == "KVM_PPC" {
createReqPPC.RGID = uint64(d.Get("rg_id").(int))
createReqPPC.Name = d.Get("name").(string)
createReqPPC.CPU = uint64(d.Get("cpu").(int))
createReqPPC.RAM = uint64(d.Get("ram").(int))
createReqPPC.ImageID = uint64(d.Get("image_id").(int))
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM PowerPC")
apiResp, err := c.CloudAPI().KVMPPC().Create(ctx, createReqPPC)
if err != nil {
return diag.FromErr(err)
}
createReqX86.RGID = uint64(d.Get("rg_id").(int))
createReqX86.Name = d.Get("name").(string)
createReqX86.CPU = uint64(d.Get("cpu").(int))
createReqX86.RAM = uint64(d.Get("ram").(int))
d.SetId(strconv.FormatUint(apiResp, 10))
computeId = apiResp
} else {
createReqX86.RGID = uint64(d.Get("rg_id").(int))
createReqX86.Name = d.Get("name").(string)
createReqX86.CPU = uint64(d.Get("cpu").(int))
createReqX86.RAM = uint64(d.Get("ram").(int))
createReqX86.Driver = driver
createReqX86.Driver = driver
if image, ok := d.GetOk("image_id"); ok {
createReqX86.ImageID = uint64(image.(int))
}
if withoutBootDisk, ok := d.GetOk("without_boot_disk"); ok {
createReqX86.WithoutBootDisk = withoutBootDisk.(bool)
}
if custom_fields, ok := d.GetOk("custom_fields"); ok {
val := custom_fields.(string)
val = strings.ReplaceAll(val, "\\", "")
val = strings.ReplaceAll(val, "\n", "")
val = strings.ReplaceAll(val, "\t", "")
val = strings.TrimSpace(val)
createReqX86.CustomFields = val
}
if numaAffinity, ok := d.GetOk("numa_affinity"); ok {
createReqX86.NumaAffinity = numaAffinity.(string)
}
createReqX86.CPUPin = d.Get("cpu_pin").(bool)
createReqX86.HPBacked = d.Get("hp_backed").(bool)
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
apiResp, err := c.CloudAPI().KVMX86().Create(ctx, createReqX86)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(apiResp, 10))
computeId = apiResp
if image, ok := d.GetOk("image_id"); ok {
createReqX86.ImageID = uint64(image.(int))
}
if withoutBootDisk, ok := d.GetOk("without_boot_disk"); ok {
createReqX86.WithoutBootDisk = withoutBootDisk.(bool)
}
if custom_fields, ok := d.GetOk("custom_fields"); ok {
val := custom_fields.(string)
val = strings.ReplaceAll(val, "\\", "")
val = strings.ReplaceAll(val, "\n", "")
val = strings.ReplaceAll(val, "\t", "")
val = strings.TrimSpace(val)
createReqX86.CustomFields = val
}
if numaAffinity, ok := d.GetOk("numa_affinity"); ok {
createReqX86.NumaAffinity = numaAffinity.(string)
}
createReqX86.CPUPin = d.Get("cpu_pin").(bool)
createReqX86.HPBacked = d.Get("hp_backed").(bool)
createReqX86.Chipset = d.Get("chipset").(string)
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
apiResp, err := c.CloudAPI().KVMX86().Create(ctx, createReqX86)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(apiResp, 10))
computeId = apiResp
warnings := dc.Warnings{}
@@ -730,12 +648,13 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
return diag.Errorf("resourceComputeUpdate: can't update compute because extnet ID %d is not allowed or does not exist", extNetId)
}
case "VFNIC":
if d.Get("driver").(string) == "KVM_PPC" {
return diag.Errorf("resourceComputeUpdate: can't create compute because 'VFNIC' net_type is not allowed for driver 'KVM_PPC'")
}
if vfpoolId, ok := existVFPoolId(ctx, m, networkData["net_id"].(int)); !ok {
return diag.Errorf("resourceComputeUpdate: can't create compute because vfpool ID %d is not allowed or does not exist", vfpoolId)
}
case "DPDK":
if dpdkId, ok := existDPDKNetId(ctx, m, networkData["net_id"].(int)); !ok {
return diag.Errorf("resourceComputeCreate: can't create compute because DPDK ID %d is not allowed or does not exist", dpdkId)
}
default:
continue
}
@@ -960,6 +879,10 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
req.HPBacked = d.Get("hp_backed").(bool)
}
if d.HasChange("chipset") {
req.Chipset = d.Get("chipset").(string)
}
// Note bene: numa_affinity, cpu_pin and hp_backed are not allowed to be changed for compute in STARTED tech status.
// If STARTED, we need to stop it before update
var isStopRequired bool
@@ -1449,10 +1372,19 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
Label: snapshotItem["label"].(string),
}
_, err := c.CloudAPI().Compute().SnapshotDelete(ctx, req)
if err != nil {
return diag.FromErr(err)
asyncMode, ok := d.GetOk("snapshot_delete_async")
if ok && asyncMode.(bool) {
_, err := c.CloudAPI().Compute().SnapshotDeleteAsync(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} else {
_, err := c.CloudAPI().Compute().SnapshotDelete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
}
@@ -1924,7 +1856,7 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Required: true,
// ForceNew: true,
StateFunc: statefuncs.StateFuncToUpper,
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86"}, false), // observe case while validating
Description: "Hardware architecture of this compute instance.",
},
"cpu": {
@@ -1948,6 +1880,12 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
//ForceNew: true, //REDEPLOY
Description: "ID of the OS image to base this compute instance on.",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Type of the emulated system.",
},
"without_boot_disk": {
Type: schema.TypeBool,
Optional: true,
@@ -2124,6 +2062,10 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Schema: snapshotSubresourceSchemaMake(),
},
},
"snapshot_delete_async": {
Type: schema.TypeBool,
Optional: true,
},
"rollback": {
Type: schema.TypeSet,
MaxItems: 1,

View File

@@ -177,13 +177,16 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
oldSet, newSet := d.GetChange("network")
oldList := oldSet.(*schema.Set).List()
newList := newSet.(*schema.Set).List()
detachMap, changeIpMap, attachMap := differenceNetwork(oldList, newList)
apiErrCount := 0
var lastSavedError error
detachSet := oldSet.(*schema.Set).Difference(newSet.(*schema.Set))
log.Debugf("utilityComputeNetworksConfigure: detach set has %d items for Compute ID %s", detachSet.Len(), d.Id())
for _, runner := range detachSet.List() {
netData := runner.(map[string]interface{})
log.Debugf("utilityComputeNetworksConfigure: detach set has %d items for Compute ID %s", len(detachMap), d.Id())
for _, netData := range detachMap {
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
req := compute.NetDetachRequest{
ComputeID: computeId,
@@ -200,22 +203,41 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
}
}
log.Debugf("utilityComputeNetworksConfigure: changeIp set has %d items for Compute ID %s", len(changeIpMap), d.Id())
for _, netData := range changeIpMap {
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
req := compute.ChangeIPRequest{
ComputeID: computeId,
NetType: netData["net_type"].(string),
NetID: uint64(netData["net_id"].(int)),
IPAddr: netData["ip_address"].(string),
}
_, err := c.CloudAPI().Compute().ChangeIP(ctx, req)
if err != nil {
log.Errorf("utilityComputeNetworksConfigure: failed to change net ID %d of type %s from Compute ID %s: %s",
netData["net_id"].(int), netData["net_type"].(string), d.Id(), err)
apiErrCount++
lastSavedError = err
}
}
needStart := false
if d.Get("network").(*schema.Set).Len() == 1 || oldSet.(*schema.Set).Len() < 1 {
if oldSet.(*schema.Set).Len() == len(detachMap) || oldSet.(*schema.Set).Len() == 0 {
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
if err := utilityComputeStop(ctx, computeId, m); err != nil {
apiErrCount++
lastSavedError = err
}
needStart = true
if start := d.Get("started"); start.(bool) {
needStart = true
}
}
attachSet := newSet.(*schema.Set).Difference(oldSet.(*schema.Set))
attachList := attachSet.List()
sort.Slice(attachList, func(i, j int) bool {
weightI := attachList[i].(map[string]interface{})["weight"].(int)
weightJ := attachList[j].(map[string]interface{})["weight"].(int)
sort.Slice(attachMap, func(i, j int) bool {
weightI := attachMap[i]["weight"].(int)
weightJ := attachMap[j]["weight"].(int)
if weightI == 0 {
return false
}
@@ -224,9 +246,8 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
}
return weightI < weightJ
})
log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", attachSet.Len(), d.Id())
for _, runner := range attachList {
netData := runner.(map[string]interface{})
log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", len(attachMap), d.Id())
for _, netData := range attachMap {
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
req := compute.NetAttachRequest{
ComputeID: computeId,
@@ -343,3 +364,50 @@ func utilityComputeUpdatePciDevices(ctx context.Context, d *schema.ResourceData,
return nil
}
func differenceNetwork(oldList, newList []interface{}) (detachMap, changeIpMap, attachMap []map[string]interface{}) {
attachMap = make([]map[string]interface{}, 0)
changeIpMap = make([]map[string]interface{}, 0)
detachMap = make([]map[string]interface{}, 0)
for _, oldNetwork := range oldList {
oldMap := oldNetwork.(map[string]interface{})
found := false
for _, newNetwork := range newList {
newMap := newNetwork.(map[string]interface{})
if newMap["net_type"] == oldMap["net_type"] && newMap["net_id"] == oldMap["net_id"] && newMap["weight"] == oldMap["weight"] {
if (newMap["net_type"].(string) == "EXTNET" || newMap["net_type"].(string) == "VINS") && newMap["ip_address"] != oldMap["ip_address"] {
changeIpMap = append(changeIpMap, newMap)
found = true
break
} else if newMap["ip_address"] == oldMap["ip_address"] {
found = true
break
}
}
}
if found {
continue
}
detachMap = append(detachMap, oldMap)
}
for _, newNetwork := range newList {
newMap := newNetwork.(map[string]interface{})
found := false
for _, oldNetwork := range oldList {
oldMap := oldNetwork.(map[string]interface{})
if newMap["net_type"] == oldMap["net_type"] && newMap["net_id"] == oldMap["net_id"] && newMap["weight"] == oldMap["weight"] {
if newMap["ip_address"] == oldMap["ip_address"] || ((newMap["net_type"].(string) == "EXTNET" || newMap["net_type"].(string) == "VINS") && newMap["ip_address"] != oldMap["ip_address"]) {
found = true
break
}
}
}
if found {
continue
}
attachMap = append(attachMap, newMap)
}
return
}

View File

@@ -280,10 +280,7 @@ func dataSourceRgSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"account_id": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -59,10 +59,6 @@ func dataSourceRgUsageSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"cpu": {
Type: schema.TypeInt,

View File

@@ -89,7 +89,6 @@ func flattenResgroup(d *schema.ResourceData, details rg.RecordResourceGroup) err
d.Set("account_id", details.AccountID)
d.Set("gid", details.GID)
d.Set("def_net_type", details.DefNetType)
d.Set("name", details.Name)
d.Set("quota", flattenQuota(details.ResourceLimits))
d.Set("account_name", details.AccountName)

View File

@@ -45,7 +45,6 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
@@ -139,8 +138,6 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
}
if defNetType, ok := d.GetOk("def_net_type"); ok {
req.DefNet = defNetType.(string) // NOTE: in API default network type is set by "def_net" parameter
} else {
d.Set("def_net_type", "PRIVATE")
}
if owner, ok := d.GetOk("owner"); ok {
@@ -155,10 +152,6 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
req.Description = description.(string)
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if extNetId, ok := d.GetOk("ext_net_id"); ok {
req.ExtNetID = uint64(extNetId.(int))
}
@@ -195,10 +188,6 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
Right: right,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudAPI().RG().AccessGrant(ctx, req)
if err != nil {
w.Add(err)
@@ -222,16 +211,12 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
if netID, ok := defNetItem["net_id"]; ok {
req.NetID = uint64(netID.(int))
}
if reason, ok := defNetItem["reason"]; ok {
req.Reason = reason.(string)
}
_, err := c.CloudAPI().RG().SetDefNet(ctx, req)
if err != nil {
w.Add(err)
}
d.Set("def_net_type", netType)
}
}
@@ -242,10 +227,6 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
RGID: apiResp,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudAPI().RG().Enable(ctx, req)
if err != nil {
w.Add(err)
@@ -255,10 +236,6 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
RGID: apiResp,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudAPI().RG().Disable(ctx, req)
if err != nil {
w.Add(err)
@@ -287,7 +264,6 @@ func resourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interfa
case status.Modeled:
return diag.Errorf("The resource group is in status: %s, please, contact support for more information", rgData.Status)
case status.Created:
case status.Enabled:
case status.Deleted:
// restoreReq := rg.RestoreRequest{RGID: rgData.ID}
// enableReq := rg.EnableRequest{RGID: rgData.ID}
@@ -374,10 +350,6 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
if restore, ok := d.GetOk("restore"); ok && restore.(bool) {
restoreReq := rg.RestoreRequest{RGID: rgData.ID}
if reason, ok := d.GetOk("reason"); ok {
restoreReq.Reason = reason.(string)
}
_, err := c.CloudAPI().RG().Restore(ctx, restoreReq)
if err != nil {
return diag.FromErr(err)
@@ -386,9 +358,6 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
if enable, ok := d.GetOk("enable"); ok && enable.(bool) {
enableReq := rg.EnableRequest{RGID: rgData.ID}
if reason, ok := d.GetOk("reason"); ok {
enableReq.Reason = reason.(string)
}
_, err = c.CloudAPI().RG().Enable(ctx, enableReq)
if err != nil {
return diag.FromErr(err)
@@ -423,26 +392,19 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
The following code fragment checks if any of these have been changed and generates error.
*/
if d.HasChange("def_net") {
_, newDefNet := d.GetChange("def_net")
if newDefNet.(*schema.Set).Len() == 0 {
return diag.Errorf("resourceResgroupUpdate: block def_net must not be empty")
}
}
for _, attr := range []string{"def_net_type", "ipcidr", "ext_ip"} {
attr_new, attr_old := d.GetChange("def_net_type")
if attr_new.(string) != attr_old.(string) {
attrNew, attrOld := d.GetChange(attr)
if attrNew.(string) != attrOld.(string) {
return diag.FromErr(fmt.Errorf("resourceResgroupUpdate: RG ID %s: changing %s for existing RG is not allowed", d.Id(), attr))
}
}
attrNew, attrOld := d.GetChange("ext_net_id")
if attrNew.(int) != attrOld.(int) {
if d.HasChange("ext_net_id") {
return diag.FromErr(fmt.Errorf("resourceResgroupUpdate: RG ID %s: changing ext_net_id for existing RG is not allowed", d.Id()))
}
if d.HasChanges("name", "quota", "description", "register_computes") {
if d.HasChanges("name", "quota", "description", "register_computes", "uniq_pools") {
if err := utilityUpdateRG(ctx, d, m, rgData.ID); err != nil {
return diag.FromErr(err)
}
@@ -477,9 +439,6 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
RGID: rgData.ID,
User: user,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudAPI().RG().AccessRevoke(ctx, req)
if err != nil {
@@ -499,10 +458,6 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
Right: right,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudAPI().RG().AccessGrant(ctx, req)
if err != nil {
return diag.FromErr(err)
@@ -511,30 +466,31 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
}
if d.HasChange("def_net") {
if ok := d.HasChange("def_net"); ok {
oldDefNet, newDefNet := d.GetChange("def_net")
if newDefNet.(*schema.Set).Len() > 0 {
changedDefNet := (newDefNet.(*schema.Set).Difference(oldDefNet.(*schema.Set))).List()
for _, changedDefNetInterface := range changedDefNet {
defNetItem := changedDefNetInterface.(map[string]interface{})
netType := defNetItem["net_type"].(string)
oldDefNet, newDefNet := d.GetChange("def_net")
if oldDefNet.(*schema.Set).Len() > 0 {
_, err := c.CloudAPI().RG().RemoveDefNet(ctx, rg.RemoveDefNetRequest{RGID: rgData.ID})
if err != nil {
return diag.FromErr(err)
}
}
if newDefNet.(*schema.Set).Len() > 0 {
changedDefNet := (newDefNet.(*schema.Set).Difference(oldDefNet.(*schema.Set))).List()
for _, changedDefNetInterface := range changedDefNet {
defNetItem := changedDefNetInterface.(map[string]interface{})
netType := defNetItem["net_type"].(string)
req := rg.SetDefNetRequest{
RGID: rgData.ID,
NetType: netType,
}
req := rg.SetDefNetRequest{
RGID: rgData.ID,
NetType: netType,
}
if netID, ok := defNetItem["net_id"]; ok {
req.NetID = uint64(netID.(int))
}
if reason, ok := defNetItem["reason"]; ok {
req.Reason = reason.(string)
}
if netID, ok := defNetItem["net_id"]; ok {
req.NetID = uint64(netID.(int))
}
_, err := c.CloudAPI().RG().SetDefNet(ctx, req)
if err != nil {
return diag.FromErr(err)
}
_, err := c.CloudAPI().RG().SetDefNet(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
@@ -559,9 +515,6 @@ func resourceResgroupDelete(ctx context.Context, d *schema.ResourceData, m inter
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudAPI().RG().Delete(ctx, req)
if err != nil {
@@ -598,7 +551,6 @@ func ResourceRgSchemaMake() map[string]*schema.Schema {
"def_net_type": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC", "NONE"}, false),
Description: "Type of the network, which this resource group will use as default for its computes - PRIVATE or PUBLIC or NONE.",
},
@@ -700,11 +652,6 @@ func ResourceRgSchemaMake() map[string]*schema.Schema {
Required: true,
Description: "Access rights to set, one of 'R', 'RCX' or 'ARCXDU'",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "Reason for action",
},
},
},
},
@@ -727,11 +674,6 @@ func ResourceRgSchemaMake() map[string]*schema.Schema {
Default: 0,
Description: "Network segment ID. If netType is PUBLIC and netId is 0 then default external network segment will be selected. If netType is PRIVATE and netId=0, the first ViNS defined for this RG will be selected. Otherwise, netId identifies either existing external network segment or ViNS.",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "Reason for action",
},
},
},
},
@@ -754,11 +696,6 @@ func ResourceRgSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "Set to True if you want force delete non-empty RG",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "Set to True if you want force delete non-empty RG",
},
"register_computes": {
Type: schema.TypeBool,
Optional: true,
@@ -876,6 +813,7 @@ func ResourceRgSchemaMake() map[string]*schema.Schema {
},
"uniq_pools": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
@@ -913,25 +851,18 @@ func ResourceResgroup() *schema.Resource {
Default: &constants.Timeout300s,
},
CustomizeDiff: func(ctx context.Context, diff *schema.ResourceDiff, i interface{}) error {
if diff.HasChange("def_net") {
diff.SetNewComputed("def_net_id")
}
if diff.HasChanges() {
diff.SetNewComputed("updated_by")
diff.SetNewComputed("updated_time")
}
return nil
},
Schema: ResourceRgSchemaMake(),
CustomizeDiff: customdiff.All(
customdiff.IfValueChange("def_net",
func(ctx context.Context, oldValue, newValue, meta interface{}) bool {
return true
},
func(ctx context.Context, d *schema.ResourceDiff, m interface{}) error {
oldValue, newValue := d.GetChange("def_net")
old := len(oldValue.(*schema.Set).List())
new_ := len(newValue.(*schema.Set).List())
if old == 1 && new_ == 0 {
return fmt.Errorf("CustomizeDiff: block def_net must not be empty")
}
return nil
},
),
),
StateUpgraders: []schema.StateUpgrader{
{
Type: resourceRGResourceV1().CoreConfigSchema().ImpliedType(),

View File

@@ -52,9 +52,6 @@ func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m
} else {
req.RGID = uint64(d.Get("rg_id").(int))
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
rgData, err := c.CloudAPI().RG().Get(ctx, req)
if err != nil {
@@ -137,8 +134,14 @@ func utilityUpdateRG(ctx context.Context, d *schema.ResourceData, m interface{},
req.RegisterComputes = d.Get("register_computes").(bool)
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
if d.HasChange("uniq_pools") {
uniqPools := d.Get("uniq_pools").([]interface{})
if len(uniqPools) == 0 {
req.ClearUniqPools = true
}
for _, pool := range uniqPools {
req.UniqPools = append(req.UniqPools, pool.(string))
}
}
_, err := c.CloudAPI().RG().Update(ctx, req)

View File

@@ -46,10 +46,6 @@ func utilityDataRgUsageCheckPresence(ctx context.Context, d *schema.ResourceData
RGID: uint64(d.Get("rg_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
usage, err := c.CloudAPI().RG().Usage(ctx, req)
if err != nil {
return nil, err

View File

@@ -86,9 +86,17 @@ func resourceSnapshotDelete(ctx context.Context, d *schema.ResourceData, m inter
Label: d.Get("label").(string),
}
_, err := c.CloudAPI().Compute().SnapshotDelete(ctx, req)
if err != nil {
return diag.FromErr(err)
asyncMode, ok := d.GetOk("delete_async_mode")
if ok && asyncMode.(bool) {
_, err := c.CloudAPI().Compute().SnapshotDeleteAsync(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} else {
_, err := c.CloudAPI().Compute().SnapshotDelete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
d.SetId("")
@@ -160,6 +168,11 @@ func resourceSnapshotSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "timestamp",
},
"delete_async_mode": {
Type: schema.TypeBool,
Computed: true,
Description: "async mode",
},
}
}

View File

@@ -154,6 +154,47 @@ func vnfInterfaceSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"mtu": {
Type: schema.TypeInt,
Computed: true,
Description: "mtu",
},
"libvirt_settings": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"txmode": {
Type: schema.TypeString,
Computed: true,
},
"ioeventfd": {
Type: schema.TypeString,
Computed: true,
},
"event_idx": {
Type: schema.TypeString,
Computed: true,
},
"queues": {
Type: schema.TypeInt,
Computed: true,
},
"rx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
"tx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"name": {
Type: schema.TypeString,
Computed: true,
@@ -170,7 +211,7 @@ func vnfInterfaceSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"node_id" : {
"node_id": {
Type: schema.TypeInt,
Computed: true,
},
@@ -178,6 +219,10 @@ func vnfInterfaceSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"bus_number": {
Type: schema.TypeInt,
Computed: true,
},
"qos": {
Type: schema.TypeList,
Computed: true,

View File

@@ -83,6 +83,11 @@ func dataSourceVinsListSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "Filter by external IP address",
},
"vnf_dev_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by VNF Device id",
},
"include_deleted": {
Type: schema.TypeBool,
Optional: true,

View File

@@ -87,25 +87,28 @@ func flattenInterfaces(interfaces []vins.ItemVNFInterface) []map[string]interfac
res := make([]map[string]interface{}, 0, len(interfaces))
for _, vnfInterface := range interfaces {
temp := map[string]interface{}{
"conn_id": vnfInterface.ConnID,
"conn_type": vnfInterface.ConnType,
"def_gw": vnfInterface.DefGW,
"enabled": vnfInterface.Enabled,
"flipgroup_id": vnfInterface.FLIPGroupID,
"guid": vnfInterface.GUID,
"ip_address": vnfInterface.IPAddress,
"listen_ssh": vnfInterface.ListenSSH,
"mac": vnfInterface.MAC,
"name": vnfInterface.Name,
"net_id": vnfInterface.NetID,
"net_mask": vnfInterface.NetMask,
"net_type": vnfInterface.NetType,
"node_id": vnfInterface.NodeID,
"pci_slot": vnfInterface.PCISlot,
"qos": flattenQOS(vnfInterface.QOS),
"target": vnfInterface.Target,
"type": vnfInterface.Type,
"vnfs": vnfInterface.VNFs,
"conn_id": vnfInterface.ConnID,
"conn_type": vnfInterface.ConnType,
"def_gw": vnfInterface.DefGW,
"enabled": vnfInterface.Enabled,
"flipgroup_id": vnfInterface.FLIPGroupID,
"guid": vnfInterface.GUID,
"ip_address": vnfInterface.IPAddress,
"listen_ssh": vnfInterface.ListenSSH,
"mac": vnfInterface.MAC,
"mtu": vnfInterface.MTU,
"name": vnfInterface.Name,
"net_id": vnfInterface.NetID,
"net_mask": vnfInterface.NetMask,
"net_type": vnfInterface.NetType,
"node_id": vnfInterface.NodeID,
"pci_slot": vnfInterface.PCISlot,
"bus_number": vnfInterface.BusNumber,
"qos": flattenQOS(vnfInterface.QOS),
"target": vnfInterface.Target,
"type": vnfInterface.Type,
"vnfs": vnfInterface.VNFs,
"libvirt_settings": flattenLibvirtSettings(vnfInterface.LibvirtSettings),
}
res = append(res, temp)
}
@@ -113,6 +116,21 @@ func flattenInterfaces(interfaces []vins.ItemVNFInterface) []map[string]interfac
return res
}
func flattenLibvirtSettings(libvirtSettings vins.LibvirtSettings) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"guid": libvirtSettings.GUID,
"txmode": libvirtSettings.TXMode,
"ioeventfd": libvirtSettings.IOEventFD,
"event_idx": libvirtSettings.EventIDx,
"queues": libvirtSettings.Queues,
"rx_queue_size": libvirtSettings.RXQueueSize,
"tx_queue_size": libvirtSettings.TXQueueSize,
}
res = append(res, temp)
return res
}
func flattenVNFDev(vnfDev vins.RecordVNFDev) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{

View File

@@ -66,6 +66,10 @@ func utilityVinsListCheckPresence(ctx context.Context, d *schema.ResourceData, m
req.ExtIP = ext_ip.(string)
}
if VNFDevId, ok := d.GetOk("vnfdev_id"); ok {
req.VNFDevId = uint64(VNFDevId.(int))
}
if includeDeleted, ok := d.GetOk("include_deleted"); ok {
req.IncludeDeleted = includeDeleted.(bool)
}

View File

@@ -49,11 +49,6 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Required: true,
},
"recursive_delete": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
},
},
},

View File

@@ -165,9 +165,8 @@ func utilityAccountUsersUpdate(ctx context.Context, d *schema.ResourceData, m in
userConv := user.(map[string]interface{})
_, err := c.CloudBroker().Account().DeleteUser(ctx, account.DeleteUserRequest{
AccountID: acc.ID,
UserName: userConv["user_id"].(string),
RecursiveDelete: userConv["recursive_delete"].(bool),
AccountID: acc.ID,
UserName: userConv["user_id"].(string),
})
if err != nil {
@@ -414,8 +413,7 @@ func isChangedUser(els []interface{}, el interface{}) bool {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["user_id"].(string) == elConv["user_id"].(string) &&
(!strings.EqualFold(elOldConv["access_type"].(string), elConv["access_type"].(string)) ||
elOldConv["recursive_delete"].(bool) != elConv["recursive_delete"].(bool)) {
(!strings.EqualFold(elOldConv["access_type"].(string), elConv["access_type"].(string))) {
return true
}
}

View File

@@ -0,0 +1,92 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Sergey Kisil, <svkisil@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package audit
import (
"context"
"os"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceAuditsToFileRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
filePath := "audits.tar.gz"
if userPath, ok := d.GetOk("file_path"); ok {
filePath = userPath.(string)
}
log.Debugf("dataSourceAuditsToFileRead: create file with name: %s", filePath)
file, err := os.Create(filePath)
defer file.Close()
if err != nil {
d.SetId("") // ensure ID is empty in this case
return diag.FromErr(err)
}
data, err := utilityAuditsToFileCheckPresence(ctx, d, m)
if err != nil {
d.SetId("") // ensure ID is empty in this case
return diag.FromErr(err)
}
log.Debugf("dataSourceAuditsToFileRead: write data to file with name: %s", filePath)
_, err = file.Write(data)
if err != nil {
d.SetId("") // ensure ID is empty in this case
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
return nil
}
func DataSourceAuditsToFile() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAuditsToFileRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAuditToFileSchemaMake(),
}
}

View File

@@ -41,7 +41,6 @@ import (
func flattenAudit(d *schema.ResourceData, au *audit.RecordAudit) {
log.Debugf("flattenAudit: decoded audit guid %s", d.Get("audit_guid").(string))
d.Set("apitask", au.Apitask)
d.Set("args", au.Arguments)
d.Set("call", au.Call)
d.Set("guid", au.GUID)

View File

@@ -10,10 +10,6 @@ func dataSourceAuditSchemaMake() map[string]*schema.Schema {
Description: "audit guid",
},
"apitask": {
Type: schema.TypeString,
Computed: true,
},
"args": {
Type: schema.TypeString,
Computed: true,
@@ -211,3 +207,13 @@ func dataSourceLinkedJobsSchemaMake() map[string]*schema.Schema {
},
}
}
func dataSourceAuditToFileSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"file_path": {
Type: schema.TypeString,
Optional: true,
Description: "file path",
},
}
}

View File

@@ -0,0 +1,54 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Sergey Kisil, <svkisil@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package audit
import (
"context"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityAuditsToFileCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) ([]byte, error) {
c := m.(*controller.ControllerCfg)
log.Debugf("utilityAuditToFileCheckPresence: load audit file")
auditTar, err := c.CloudBroker().Audit().ExportAuditsToFile(ctx)
if err != nil {
return nil, err
}
return auditTar, nil
}

View File

@@ -264,7 +264,6 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface
DiskID: disk.ID,
Detach: d.Get("detach").(bool),
Permanently: d.Get("permanently").(bool),
Reason: d.Get("reason").(string),
}
_, err = c.CloudBroker().Disks().Delete(ctx, req)
@@ -334,12 +333,6 @@ func resourceDiskRestore(ctx context.Context, d *schema.ResourceData, m interfac
DiskID: uint64(d.Get("disk_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudBroker().Disks().Restore(ctx, req)
return err
}

View File

@@ -177,7 +177,6 @@ func resourceDiskReplicationDelete(ctx context.Context, d *schema.ResourceData,
DiskID: disk.ID,
Detach: d.Get("detach").(bool),
Permanently: d.Get("permanently").(bool),
Reason: d.Get("reason").(string),
}
log.Debugf("resourceDiskReplicationDelete: delete disk replica for disk with ID: %d", diskId)

View File

@@ -207,27 +207,27 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Type: schema.TypeInt,
Computed: true,
},
"pool_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"self_volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"storage_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
},
@@ -611,27 +611,27 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Type: schema.TypeInt,
Computed: true,
},
"pool_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"self_volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"storage_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
},
@@ -1006,27 +1006,27 @@ func dataSourceDiskListDeletedSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Type: schema.TypeInt,
Computed: true,
},
"pool_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"self_volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"storage_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
},
@@ -1859,12 +1859,6 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "whether to completely delete the disk, works only with non attached disks",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: "reason for an action",
},
"shareable": {
Type: schema.TypeBool,
Optional: true,
@@ -2072,27 +2066,27 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Type: schema.TypeInt,
Computed: true,
},
"pool_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"self_volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"storage_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
},
@@ -2393,27 +2387,27 @@ func dataSourceDiskReplicationSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Type: schema.TypeInt,
Computed: true,
},
"pool_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"self_volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"storage_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
},
@@ -2568,11 +2562,7 @@ func resourceDiskReplicationSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "Delete disk permanently",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "Reason for disk deletion",
},
"replica_disk_id": {
Type: schema.TypeInt,
Computed: true,
@@ -2771,27 +2761,27 @@ func resourceDiskReplicationSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Type: schema.TypeInt,
Computed: true,
},
"pool_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"self_volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"storage_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
},
@@ -2889,4 +2879,4 @@ func resourceDiskReplicationSchemaMake() map[string]*schema.Schema {
}
return rets
}
}

View File

@@ -0,0 +1,72 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package dpdknet
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceDPDKNetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
dpdk, err := utilityDPDKNetCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
flattenDPDKNet(d, dpdk)
return nil
}
func DataSourceDPDKNet() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDPDKNetRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDPDKNetSchemaMake(),
}
}

View File

@@ -0,0 +1,72 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package dpdknet
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceDPDKNetListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
dpdkList, err := utilityDPDKNetListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenDPDKNetList(dpdkList))
d.Set("entry_count", dpdkList.EntryCount)
return nil
}
func DataSourceDPDKNetList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDPDKNetListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDPDKNetListSchemaMake(),
}
}

View File

@@ -0,0 +1,44 @@
package dpdknet
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
dpdk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/dpdknet"
)
func flattenDPDKNet(d *schema.ResourceData, dpdk *dpdk.RecordDPDKNet) {
d.Set("dpdk_id", dpdk.ID)
d.Set("account_access", dpdk.AccountAccess)
d.Set("created_time", dpdk.CreatedTime)
d.Set("desc", dpdk.Description)
d.Set("gid", dpdk.GID)
d.Set("guid", dpdk.GUID)
d.Set("name", dpdk.Name)
d.Set("rg_access", dpdk.RGAccess)
d.Set("status", dpdk.Status)
d.Set("ovs_bridge", dpdk.OVSBridge)
d.Set("vlan_id", dpdk.VlanID)
d.Set("compute_ids", dpdk.ComputeIDs)
d.Set("updated_time", dpdk.UpdatedTime)
}
func flattenDPDKNetList(list *dpdk.ListDPDKNet) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(list.Data))
for _, dpdk := range list.Data {
temp := map[string]interface{}{
"dpdk_id": dpdk.ID,
"account_access": dpdk.AccountAccess,
"desc": dpdk.Description,
"gid": dpdk.GID,
"guid": dpdk.GUID,
"name": dpdk.Name,
"rg_access": dpdk.RGAccess,
"status": dpdk.Status,
"ovs_bridge": dpdk.OVSBridge,
"vlan_id": dpdk.VlanID,
"compute_ids": dpdk.ComputeIDs,
"updated_time": dpdk.UpdatedTime,
}
res = append(res, temp)
}
return res
}

View File

@@ -0,0 +1,24 @@
package dpdknet
import (
"context"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics {
var errs []error
gid := uint64(d.Get("gid").(int))
if err := ic.ExistGID(ctx, gid, c); err != nil {
errs = append(errs, err)
}
return dc.ErrorsToDiagnostics(errs)
}

View File

@@ -0,0 +1,246 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package dpdknet
import (
"context"
"strconv"
dpdk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/dpdknet"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
)
func resourceDPDKNetCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDPDKNetCreate: called for DPDK network %s", d.Get("name").(string))
c := m.(*controller.ControllerCfg)
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
req := dpdk.CreateRequest{
Name: d.Get("name").(string),
GID: uint64(d.Get("gid").(int)),
VlanID: uint64(d.Get("vlan_id").(int)),
OVSBridge: d.Get("ovs_bridge").(string),
}
if desc, ok := d.GetOk("desc"); ok {
req.Description = desc.(string)
}
if accountAccess, ok := d.GetOk("account_access"); ok {
IDs := accountAccess.([]interface{})
for _, ID := range IDs {
req.AccountAccess = append(req.AccountAccess, uint64(ID.(int)))
}
}
if rgAccess, ok := d.GetOk("rg_access"); ok {
IDs := rgAccess.([]interface{})
for _, ID := range IDs {
req.RGAccess = append(req.RGAccess, uint64(ID.(int)))
}
}
dpdkID, err := c.CloudBroker().DPDKNet().Create(ctx, req)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(dpdkID, 10))
d.Set("dpdk_id", dpdkID)
warnings := dc.Warnings{}
if err = utilityDPDKNetEnabled(ctx, d, m); err != nil {
warnings.Add(err)
}
return append(warnings.Get(), resourceDPDKNetRead(ctx, d, m)...)
}
func resourceDPDKNetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDPDKNetRead: called for pdpk_id %d", d.Get("dpdk_id").(int))
w := dc.Warnings{}
dpdkItem, err := utilityDPDKNetCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
log.Debugf("status: %s", dpdkItem.Status)
switch dpdkItem.Status {
case status.Destroyed, status.Purged:
d.Set("dpdk_id", 0)
d.SetId("")
return diag.Errorf("The resource cannot be read because it has been destroyed")
case status.Deleted:
case status.Assigned:
case status.Modeled:
return diag.Errorf("The DPDK network is in status: %s, please, contact support for more information", dpdkItem.Status)
case status.Creating:
case status.Created:
case status.Allocated:
case status.Unallocated:
}
flattenDPDKNet(d, dpdkItem)
return w.Get()
}
func resourceDPDKNetUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDPDKNetUpdate: called for dpdk_id %d", d.Get("dpdk_id").(int))
c := m.(*controller.ControllerCfg)
w := dc.Warnings{}
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
dpdkItem, err := utilityDPDKNetCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
switch dpdkItem.Status {
case status.Destroyed, status.Purged:
d.Set("dpdk_id", 0)
d.SetId("")
return diag.Errorf("The resource cannot be updated because it has been destroyed")
case status.Deleted:
d.Set("dpdk_id", 0)
d.SetId("")
return diag.Errorf("The resource cannot be updated because it has been deleted")
case status.Assigned:
case status.Modeled:
return diag.Errorf("The DPDK network is in status: %s, please, contact support for more information", dpdkItem.Status)
case status.Creating:
case status.Created:
case status.Allocated:
case status.Unallocated:
}
if d.HasChange("enabled") {
if err := utilityDPDKNetEnabled(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChanges("name", "desc", "vlan_id", "ovs_bridge", "account_access", "rg_access") {
if err := utilityDPDKNetUpdate(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
return append(w.Get(), resourceDPDKNetRead(ctx, d, m)...)
}
func resourceDPDKNetDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDPDKNetDelete: called for dpdk_id %d", d.Get("dpdk_id").(int))
c := m.(*controller.ControllerCfg)
dpdkItem, err := utilityDPDKNetCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
req := dpdk.DeleteRequest{
DPDKID: dpdkItem.ID,
}
if d.Get("enabled") == true {
req := dpdk.DisableRequest{
DPDKID: dpdkItem.ID,
}
if _, err := c.CloudBroker().DPDKNet().Disable(ctx, req); err != nil {
return diag.FromErr(err)
}
}
_, err = c.CloudBroker().DPDKNet().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId("")
return nil
}
func ResourceDPDKNet() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourceDPDKNetCreate,
ReadContext: resourceDPDKNetRead,
UpdateContext: resourceDPDKNetUpdate,
DeleteContext: resourceDPDKNetDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
CustomizeDiff: func(ctx context.Context, diff *schema.ResourceDiff, i interface{}) error {
if diff.HasChange("enable") {
diff.SetNewComputed("status")
}
if diff.HasChanges() {
diff.SetNewComputed("updated_time")
}
return nil
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout600s,
Read: &constants.Timeout300s,
Update: &constants.Timeout300s,
Delete: &constants.Timeout300s,
Default: &constants.Timeout300s,
},
Schema: resourceDPDKNetSchemaMake(),
}
}

View File

@@ -0,0 +1,312 @@
package dpdknet
import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
func dataSourceDPDKNetSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"dpdk_id": {
Type: schema.TypeInt,
Required: true,
Description: "The unique ID of the subscriber-owner of the DPDK network",
},
"account_access": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of accounts with access",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Created time",
},
"desc": {
Type: schema.TypeString,
Computed: true,
Description: "Description of DPDK network",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the grid (platform)",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "DPDK network ID on the storage side",
},
"name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of network",
},
"rg_access": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of resource groups with access",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "DPDK network status",
},
"ovs_bridge": {
Type: schema.TypeString,
Computed: true,
Description: "OVS bridge in which interfaces for computers created",
},
"vlan_id": {
Type: schema.TypeInt,
Computed: true,
Description: "vlan ID",
},
"compute_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "Compute IDs which uses this DPDK network",
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Updated time",
},
}
return res
}
func dataSourceDPDKNetListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by ID",
},
"gid": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by GID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by name",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Description: "Find by description",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Find by status",
},
"compute_ids": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "Find by compute IDs",
},
"sort_by": {
Type: schema.TypeString,
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"dpdk_id": {
Type: schema.TypeInt,
Required: true,
Description: "The unique ID of the subscriber-owner of the DPDK network",
},
"account_access": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of accounts with access",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Created time",
},
"desc": {
Type: schema.TypeString,
Computed: true,
Description: "Description of DPDK network",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the grid (platform)",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "DPDK network ID on the storage side",
},
"name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of network",
},
"rg_access": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of resource groups with access",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "DPDK network status",
},
"ovs_bridge": {
Type: schema.TypeString,
Computed: true,
Description: "OVS bridge in which interfaces for computers created",
},
"vlan_id": {
Type: schema.TypeInt,
Computed: true,
Description: "vlan ID",
},
"compute_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "Compute IDs which uses this DPDK network",
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Updated time",
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func resourceDPDKNetSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"gid": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the grid (platform)",
},
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of network",
},
"ovs_bridge": {
Type: schema.TypeString,
Required: true,
Description: "OVS bridge in which interfaces for computers created",
},
"vlan_id": {
Type: schema.TypeInt,
Required: true,
Description: "vlan ID",
},
"dpdk_id": {
Type: schema.TypeInt,
Computed: true,
Description: "The unique ID of the subscriber-owner of the DPDK network",
},
"account_access": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of accounts with access",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Created time",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Description of DPDK network",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Description: "Enabled or disabled DPDK network",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "DPDK network ID on the storage side",
},
"rg_access": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of resource groups with access",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "DPDK network status",
},
"compute_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "Compute IDs which uses this DPDK network",
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Updated time",
},
}
return res
}

View File

@@ -0,0 +1,166 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package dpdknet
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
dpdk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/dpdknet"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func utilityDPDKNetCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*dpdk.RecordDPDKNet, error) {
c := m.(*controller.ControllerCfg)
req := dpdk.GetRequest{}
if d.Get("dpdk_id") != nil {
if d.Get("dpdk_id").(int) == 0 {
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.DPDKID = id
} else {
req.DPDKID = uint64(d.Get("dpdk_id").(int))
}
} else {
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.DPDKID = id
}
log.Debugf("utilityDPDKCheckPresence: get DPDK network")
dpdk, err := c.CloudBroker().DPDKNet().Get(ctx, req)
if err != nil {
return nil, err
}
return dpdk, nil
}
func utilityDPDKNetEnabled(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
dpdkID, _ := strconv.ParseUint(d.Id(), 10, 64)
enabled := d.Get("enabled").(bool)
if enabled {
req := dpdk.EnableRequest{
DPDKID: dpdkID,
}
if _, err := c.CloudBroker().DPDKNet().Enable(ctx, req); err != nil {
return err
}
} else {
req := dpdk.DisableRequest{
DPDKID: dpdkID,
}
if _, err := c.CloudBroker().DPDKNet().Disable(ctx, req); err != nil {
return err
}
}
log.Debugf("resourceDPDKNetUpdate: enable=%v DPDK Network ID %s after completing its resource configuration", enabled, d.Id())
return nil
}
func utilityDPDKNetUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
dpdkID, _ := strconv.ParseUint(d.Id(), 10, 64)
req := dpdk.UpdateRequest{
DPDKID: dpdkID,
}
if d.HasChange("name") {
req.Name = d.Get("name").(string)
}
if d.HasChange("desc") {
req.Description = d.Get("desc").(string)
}
if d.HasChange("vlan_id") {
req.VlanID = uint64(d.Get("vlan_id").(int))
}
if d.HasChange("ovs_bridge") {
req.OVSBridge = d.Get("ovs_bridge").(string)
}
if d.HasChange("account_access") {
if accountAccess, ok := d.GetOk("account_access"); ok {
IDs := accountAccess.([]interface{})
for _, ID := range IDs {
req.AccountAccess = append(req.AccountAccess, uint64(ID.(int)))
}
}
}
if d.HasChange("rg_access") {
if rgAccess, ok := d.GetOk("rg_access"); ok {
IDs := rgAccess.([]interface{})
for _, ID := range IDs {
req.RGAccess = append(req.RGAccess, uint64(ID.(int)))
}
}
}
if d.Get("enabled") == true {
req := dpdk.DisableRequest{
DPDKID: dpdkID,
}
if _, err := c.CloudBroker().DPDKNet().Disable(ctx, req); err != nil {
return err
}
}
if _, err := c.CloudBroker().DPDKNet().Update(ctx, req); err != nil {
return err
}
if d.Get("enabled") == true {
req := dpdk.EnableRequest{
DPDKID: dpdkID,
}
if _, err := c.CloudBroker().DPDKNet().Enable(ctx, req); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,108 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package dpdknet
import (
"context"
log "github.com/sirupsen/logrus"
dpdk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/dpdknet"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityDPDKNetListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*dpdk.ListDPDKNet, error) {
c := m.(*controller.ControllerCfg)
req := dpdk.ListRequest{}
if byID, ok := d.GetOk("by_id"); ok {
req.ByID = uint64(byID.(int))
}
if GID, ok := d.GetOk("gid"); ok {
req.GID = uint64(GID.(int))
}
if name, ok := d.GetOk("name"); ok {
req.Name = name.(string)
}
if desc, ok := d.GetOk("description"); ok {
req.Description = desc.(string)
}
if status, ok := d.GetOk("status"); ok {
req.Status = status.(string)
}
if vlanID, ok := d.GetOk("vlan_id"); ok {
req.VlanID = uint64(vlanID.(int))
}
if computeIDs, ok := d.GetOk("compute_ids"); ok {
IDs := computeIDs.([]interface{})
for _, ID := range IDs {
req.ComputeIDs = append(req.ComputeIDs, uint64(ID.(int)))
}
}
if computeIDs, ok := d.GetOk("compute_ids"); ok {
IDs := computeIDs.([]interface{})
for _, ID := range IDs {
req.ComputeIDs = append(req.ComputeIDs, uint64(ID.(int)))
}
}
if accountAccess, ok := d.GetOk("account_access"); ok {
IDs := accountAccess.([]interface{})
for _, ID := range IDs {
req.AccountAccess = append(req.AccountAccess, uint64(ID.(int)))
}
}
if rgAccess, ok := d.GetOk("rg_access"); ok {
IDs := rgAccess.([]interface{})
for _, ID := range IDs {
req.RGAccess = append(req.RGAccess, uint64(ID.(int)))
}
}
if sortBy, ok := d.GetOk("sort_by"); ok {
req.SortBy = sortBy.(string)
}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
log.Debugf("utilityDPDKListCheckPresence: load DPDK network list")
dpdkList, err := c.CloudBroker().DPDKNet().List(ctx, req)
if err != nil {
return nil, err
}
return dpdkList, nil
}

View File

@@ -0,0 +1,69 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package grid
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceGridGetSettingsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
gridSettings, err := utilityGridGetSettingsCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
flattenGridSettings(d, gridSettings)
return nil
}
func DataSourceGridGetSettings() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceGridGetSettingsRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceGridGetSettingsSchemaMake(),
}
}

View File

@@ -1,12 +1,16 @@
package grid
import (
"encoding/json"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
)
func flattenGrid(d *schema.ResourceData, grid *grid.RecordGrid) {
d.Set("ckey", grid.CKey)
d.Set("meta", flattens.FlattenMeta(grid.Meta))
d.Set("auth_broker", flattens.FlattenMeta(grid.AuthBroker))
d.Set("name", grid.Name)
d.Set("flag", grid.Flag)
@@ -88,3 +92,83 @@ func flattenGridSeps(seps map[string]map[string]grid.DiskUsage) []map[string]int
}
return res
}
func flattenGridSettings(d *schema.ResourceData, gridSettings *grid.RecordSettingsGrid) {
limits, _ := json.Marshal(gridSettings.Limits)
d.Set("allowed_ports", gridSettings.Allowedports)
d.Set("cleanup_retention_period", gridSettings.CleanupRetentionPeriod)
d.Set("docker_registry", flattenDockerRegistry(gridSettings.DockerRegistry))
d.Set("enable_uptime_monitor", gridSettings.EnableUptimeMonitor)
d.Set("extnet_max_pre_reservations_num", gridSettings.ExtnetMaxPreReservationsNum)
d.Set("healthcheck_notifications", flattenHealthcheckNotifications(gridSettings.HealthcheckNotifications))
d.Set("k8s_cleanup_enabled", gridSettings.K8sCleanupEnabled)
d.Set("limits", string(limits))
d.Set("location_url", gridSettings.LocationURL)
d.Set("net_qos", flattenNetQOS(gridSettings.NetQOS))
d.Set("networks", gridSettings.Networks)
d.Set("prometheus", flattenPrometheus(gridSettings.Prometheus))
d.Set("vins_max_pre_reservations_num", gridSettings.VinsMaxPreReservationsNum)
d.Set("vnfdev_mgmt_net_range", gridSettings.VnfdevMgmtNetRange)
}
func flattenDockerRegistry(dr grid.DockerRegistry) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"password": dr.Password,
"server": dr.Server,
"username": dr.Username,
}
res = append(res, temp)
return res
}
func flattenHealthcheckNotifications(hn grid.HealthcheckNotifications) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"emails": flattenEmails(hn.Emails),
}
res = append(res, temp)
return res
}
func flattenEmails(emails []grid.Emails) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(emails))
for _, email := range emails {
temp := map[string]interface{}{
"address": email.Address,
"enabled": email.Enabled,
}
res = append(res, temp)
}
return res
}
func flattenNetQOS(netQOS grid.NetQOS) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"extnet": flattenSettingsNetQOS(netQOS.ExtNet),
"vins": flattenSettingsNetQOS(netQOS.VINS),
}
res = append(res, temp)
return res
}
func flattenSettingsNetQOS(qos grid.SettingsNetQOS) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"e_rate": qos.ERate,
"in_burst": qos.InBurst,
"in_rate": qos.InRate,
}
res = append(res, temp)
return res
}
func flattenPrometheus(pr grid.Prometheus) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"scrape_interval": pr.ScrapeInterval,
}
res = append(res, temp)
return res
}

View File

@@ -8,6 +8,18 @@ func dataSourceGetGridSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"auth_broker": {
Type: schema.TypeList,
Computed: true,
@@ -593,3 +605,160 @@ func dataSourceGridPostDiagnosisSchemaMake() map[string]*schema.Schema {
},
}
}
func dataSourceGridGetSettingsSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"grid_id": {
Type: schema.TypeInt,
Description: "grid (platform) ID",
Required: true,
},
"allowed_ports": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"cleanup_retention_period": {
Type: schema.TypeInt,
Computed: true,
},
"docker_registry": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"password": {
Type: schema.TypeString,
Computed: true,
},
"server": {
Type: schema.TypeString,
Computed: true,
},
"username": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"enable_uptime_monitor": {
Type: schema.TypeBool,
Computed: true,
},
"extnet_max_pre_reservations_num": {
Type: schema.TypeInt,
Computed: true,
},
"healthcheck_notifications": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"emails": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"address": {
Type: schema.TypeString,
Computed: true,
},
"enabled": {
Type: schema.TypeBool,
Computed: true,
},
},
},
},
},
},
},
"k8s_cleanup_enabled": {
Type: schema.TypeBool,
Computed: true,
},
"limits": {
Type: schema.TypeString,
Computed: true,
},
"location_url": {
Type: schema.TypeString,
Computed: true,
},
"net_qos": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"extnet": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"e_rate": {
Type: schema.TypeInt,
Computed: true,
},
"in_burst": {
Type: schema.TypeInt,
Computed: true,
},
"in_rate": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"vins": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"e_rate": {
Type: schema.TypeInt,
Computed: true,
},
"in_burst": {
Type: schema.TypeInt,
Computed: true,
},
"in_rate": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
},
},
},
"networks": {
Type: schema.TypeString,
Computed: true,
},
"prometheus": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"scrape_interval": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"vins_max_pre_reservations_num": {
Type: schema.TypeInt,
Computed: true,
},
"vnfdev_mgmt_net_range": {
Type: schema.TypeString,
Computed: true,
},
}
}

View File

@@ -0,0 +1,63 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package grid
import (
"context"
"strconv"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityGridGetSettingsCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*grid.RecordSettingsGrid, error) {
c := m.(*controller.ControllerCfg)
req := grid.GetSettingsRequest{}
if d.Id() != "" {
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.GID = id
} else {
req.GID = uint64(d.Get("grid_id").(int))
}
log.Debugf("utilityGridutilityGridGetSettingsCheckPresenceCheckPresence: load grid settings")
gridSettingsRec, err := c.CloudBroker().Grid().GetSettings(ctx, req)
if err != nil {
return nil, err
}
return gridSettingsRec, nil
}

View File

@@ -8,6 +8,7 @@ import (
cb_account "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
cb_compute "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
cb_disks "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
cb_dpdk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/dpdknet"
cb_extnet "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet"
cb_gid "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
cb_image "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
@@ -284,6 +285,39 @@ func ExistVFPools(ctx context.Context, vfpoolIds []uint64, c *controller.Control
return errs
}
func ExistDPDKNet(ctx context.Context, dpdkIds []uint64, c *controller.ControllerCfg) []error {
var errs []error
if len(dpdkIds) == 0 {
return errs
}
req := cb_dpdk.ListRequest{}
dpdkList, err := c.CloudBroker().DPDKNet().List(ctx, req)
if err != nil {
errs = append(errs, err)
return errs
}
for _, dpdkId := range dpdkIds {
found := false
for _, dpdk := range dpdkList.Data {
if dpdkId == dpdk.ID {
found = true
break
}
}
if !found {
errs = append(errs, fmt.Errorf("DPDKNet with ID %v not found", dpdkId))
}
}
return errs
}
func ExistExtNetInLb(ctx context.Context, extNetId uint64, c *controller.ControllerCfg) error {
if extNetId == 0 {
return nil

View File

@@ -130,10 +130,6 @@ func resourceCDROMImageDelete(ctx context.Context, d *schema.ResourceData, m int
ImageID: imageData.ID,
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudBroker().Image().DeleteCDROMImage(ctx, req)
if err != nil {
return diag.FromErr(err)

View File

@@ -141,13 +141,6 @@ func resourceImageDelete(ctx context.Context, d *schema.ResourceData, m interfac
ImageID: uint64(d.Get("image_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudBroker().Image().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)

View File

@@ -179,13 +179,6 @@ func resourceImageFromBlankComputeDelete(ctx context.Context, d *schema.Resource
ImageID: uint64(d.Get("image_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudBroker().Image().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)

View File

@@ -186,13 +186,6 @@ func resourceImageFromPlatformDiskDelete(ctx context.Context, d *schema.Resource
ImageID: uint64(d.Get("image_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudBroker().Image().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)

View File

@@ -99,13 +99,6 @@ func resourceVirtualImageDelete(ctx context.Context, d *schema.ResourceData, m i
ImageID: uint64(d.Get("image_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudBroker().Image().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)

View File

@@ -407,6 +407,11 @@ func dataSourceImageListSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "page size",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Description: "find by enabled True or False",
},
"items": {
Type: schema.TypeList,
Computed: true,
@@ -458,7 +463,7 @@ func dataSourceImageListSchemaMake() map[string]*schema.Schema {
"architecture": {
Type: schema.TypeString,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
Description: "binary architecture of this image, one of X86_64",
},
"boot_type": {
Type: schema.TypeString,
@@ -716,7 +721,7 @@ func dataSourceImageSchemaMake() map[string]*schema.Schema {
"architecture": {
Type: schema.TypeString,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
Description: "binary architecture of this image, one of X86_64",
},
"boot_type": {
Type: schema.TypeString,
@@ -922,7 +927,7 @@ func resourceCDROMImageSchemaMake() map[string]*schema.Schema {
},
"drivers": {
Type: schema.TypeList,
Required: true,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
@@ -968,13 +973,7 @@ func resourceCDROMImageSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Whether to completely delete the image",
Description: "binary architecture of this image, one of X86_64",
},
"bootable": {
Type: schema.TypeBool,
@@ -1269,12 +1268,7 @@ func resourceImageSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Description: "Whether to completely delete the image",
Description: "binary architecture of this image, one of X86_64",
},
"bootable": {
Type: schema.TypeBool,
@@ -1313,10 +1307,7 @@ func resourceImageSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
},
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"accounts": {
Type: schema.TypeList,
Optional: true,
@@ -1534,15 +1525,7 @@ func resourceVirtualImageSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
},
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Description: "Whether to completely delete the image",
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"unc_path": {
Type: schema.TypeString,
Computed: true,
@@ -1595,7 +1578,7 @@ func resourceVirtualImageSchemaMake() map[string]*schema.Schema {
"architecture": {
Type: schema.TypeString,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
Description: "binary architecture of this image, one of X86_64",
},
"boot_type": {
Type: schema.TypeString,
@@ -1801,12 +1784,6 @@ func resourceImageFromBlankComputeSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "create an image in async/sync mode",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "whether to completely delete the image",
},
"bootable": {
Type: schema.TypeBool,
Optional: true,
@@ -1838,10 +1815,7 @@ func resourceImageFromBlankComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
},
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"accounts": {
Type: schema.TypeList,
Optional: true,
@@ -2039,7 +2013,7 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
"architecture": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"X86_64", "PPC64_LE"}, true),
ValidateFunc: validation.StringInSlice([]string{"X86_64"}, true),
Description: "Image type linux, windows or other",
},
@@ -2069,11 +2043,10 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
},
"drivers": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Required: true,
Elem: &schema.Schema{
StateFunc: statefuncs.StateFuncToUpper,
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86"}, false), // observe case while validating
Type: schema.TypeString,
},
Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]",
@@ -2096,12 +2069,6 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "create an image in async/sync mode",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "whether to completely delete the image",
},
"bootable": {
Type: schema.TypeBool,
Optional: true,
@@ -2133,10 +2100,7 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
},
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"accounts": {
Type: schema.TypeList,
Optional: true,

View File

@@ -72,13 +72,13 @@ func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData,
if pool, ok := d.GetOk("pool"); ok {
req.Pool = pool.(string)
}
if public, ok := d.GetOk("public"); ok {
if public, ok := d.GetOkExists("public"); ok {
req.Public = public.(bool)
}
if hotResize, ok := d.GetOk("hot_resize"); ok {
if hotResize, ok := d.GetOkExists("hot_resize"); ok {
req.HotResize = hotResize.(bool)
}
if bootable, ok := d.GetOk("bootable"); ok {
if bootable, ok := d.GetOkExists("bootable"); ok {
req.Bootable = bootable.(bool)
}
if sortBy, ok := d.GetOk("sort_by"); ok {
@@ -90,7 +90,9 @@ func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData,
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
if enabled, ok := d.GetOkExists("enabled"); ok {
req.Enabled = enabled.(bool)
}
log.Debugf("utilityImageListCheckPresence: load image list")
imageList, err := c.CloudBroker().Image().List(ctx, req)
if err != nil {

View File

@@ -48,14 +48,6 @@ func SyncCreateRequest(ctx context.Context, d *schema.ResourceData, m interface{
ImageType: d.Get("image_type").(string),
}
c := m.(*controller.ControllerCfg)
if err := ic.ExistGID(ctx, uint64(d.Get("gid").(int)), c); err != nil {
return req, err
}
req.GID = uint64(d.Get("gid").(int))
if _, ok := d.GetOk("drivers"); ok {
drivers := []string{}
for _, driver := range d.Get("drivers").([]interface{}) {

View File

@@ -111,14 +111,16 @@ func dataSourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{
d.Set("kubeconfig", kubeconfig)
getLbReq := lb.GetRequest{LBID: cluster.LBID}
lb, err := c.CloudBroker().LB().Get(ctx, getLbReq)
if err != nil {
return diag.FromErr(err)
}
if cluster.LBID != 0 {
getLbReq := lb.GetRequest{LBID: cluster.LBID}
lb, err := c.CloudBroker().LB().Get(ctx, getLbReq)
if err != nil {
return diag.FromErr(err)
}
d.Set("extnet_id", lb.ExtNetID)
d.Set("lb_ip", lb.PrimaryNode.FrontendIP)
d.Set("extnet_id", lb.ExtNetID)
d.Set("lb_ip", lb.PrimaryNode.FrontendIP)
}
flattenK8sData(d, cluster, masterComputeList, workersComputeList)
return nil

View File

@@ -64,6 +64,9 @@ func flattenResourceK8sCP(d *schema.ResourceData, k8s k8s.RecordK8S, masters []c
d.Set("updated_by", k8s.UpdatedBy)
d.Set("updated_time", k8s.UpdatedTime)
d.Set("network_plugin", k8s.NetworkPlugin)
d.Set("highly_available_lb", k8s.HighlyAvailableLB)
d.Set("address_vip", flattenAddressVIP(k8s.AddressVIP))
d.Set("extnet_only", k8s.ExtnetOnly)
flattenCPParams(d, k8s.K8SGroups.Masters, masters)
}
@@ -100,6 +103,20 @@ func flattenK8sData(d *schema.ResourceData, cluster *k8s.RecordK8S, masters []co
d.Set("tech_status", cluster.TechStatus)
d.Set("updated_by", cluster.UpdatedBy)
d.Set("updated_time", cluster.UpdatedTime)
d.Set("highly_available_lb", cluster.HighlyAvailableLB)
d.Set("address_vip", flattenAddressVIP(cluster.AddressVIP))
d.Set("extnet_only", cluster.ExtnetOnly)
d.Set("with_lb", cluster.WithLB)
}
func flattenAddressVIP(addressVIP k8s.K8SAddressVIP) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"backend_ip": addressVIP.BackendIP,
"frontend_ip": addressVIP.FrontendIP,
}
res = append(res, temp)
return res
}
func flattenAcl(acl k8s.RecordACLGroup) []map[string]interface{} {

View File

@@ -158,6 +158,10 @@ func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interfac
createReq.OidcCertificate = oidcCertificate.(string)
}
if chipset, ok := d.GetOk("chipset"); ok {
createReq.Chipset = chipset.(string)
}
if extNetOnly, ok := d.GetOk("extnet_only"); ok {
createReq.ExtNetOnly = extNetOnly.(bool)
}
@@ -475,10 +479,10 @@ func handleUpdateNum(ctx context.Context, d *schema.ResourceData, c *controller.
oldVal, newVal := d.GetChange("num")
if oldVal.(int) > newVal.(int) {
ids := make([]string, 0)
ids := make([]uint64, 0)
for i := oldVal.(int) - 1; i >= newVal.(int); i-- {
id := k8sData.K8SGroups.Masters.DetailedInfo[i].ID
ids = append(ids, strconv.FormatUint(id, 10))
ids = append(ids, id)
}
req := k8s.DeleteMasterFromGroupRequest{

View File

@@ -65,6 +65,7 @@ func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interfac
WorkerRAM: uint64(d.Get("ram").(int)),
WorkerSEPID: uint64(d.Get("worker_sep_id").(int)),
WorkerSEPPool: d.Get("worker_sep_pool").(string),
Chipset: d.Get("chipset").(string),
}
if d.Get("disk") == nil {
@@ -187,6 +188,7 @@ func resourceK8sWgUpdate(ctx context.Context, d *schema.ResourceData, m interfac
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: wg.ID,
Num: uint64(newNum) - wg.Num,
Chipset: d.Get("chipset").(string),
}
_, err := c.CloudBroker().K8S().WorkerAdd(ctx, req)

View File

@@ -1439,6 +1439,11 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "insert ssl certificate in x509 pem format",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Description: "Type of the emulated system.",
},
"lb_sysctl_params": {
Type: schema.TypeList,
Optional: true,
@@ -1771,6 +1776,11 @@ func resourceK8sWgSchemaMake() map[string]*schema.Schema {
),
Description: "Node RAM in MB.",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Description: "Type of the emulated system.",
},
"disk": {
Type: schema.TypeInt,
Optional: true,

View File

@@ -39,7 +39,6 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
// "github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {

View File

@@ -38,6 +38,7 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute, p
d.Set("boot_disk_id", bootDisk.ID)
// we intentionally use the SizeMax field, do not change it until the BootDiskSize field is fixed on the platform
d.Set("boot_disk_size", bootDisk.SizeMax)
d.Set("chipset", computeRec.Chipset)
d.Set("cd_image_id", computeRec.CdImageId)
d.Set("clone_reference", computeRec.CloneReference)
d.Set("clones", computeRec.Clones)
@@ -156,31 +157,49 @@ func flattenInterfaces(ifaces compute.ListInterfaces) []map[string]interface{} {
for _, iface := range ifaces {
res = append(res, map[string]interface{}{
"conn_id": iface.ConnID,
"conn_type": iface.ConnType,
"def_gw": iface.DefGW,
"enabled": iface.Enabled,
"flip_group_id": iface.FLIPGroupID,
"guid": iface.GUID,
"ip_address": iface.IPAddress,
"listen_ssh": iface.ListenSSH,
"mac": iface.MAC,
"name": iface.Name,
"net_id": iface.NetID,
"netmask": iface.NetMask,
"net_type": iface.NetType,
"node_id": iface.NodeID,
"pci_slot": iface.PCISlot,
"qos": flattenQOS(iface.QOS),
"target": iface.Target,
"type": iface.Type,
"vnfs": iface.VNFs,
"bus_number": iface.BusNumber,
"conn_id": iface.ConnID,
"conn_type": iface.ConnType,
"def_gw": iface.DefGW,
"enabled": iface.Enabled,
"flip_group_id": iface.FLIPGroupID,
"guid": iface.GUID,
"ip_address": iface.IPAddress,
"listen_ssh": iface.ListenSSH,
"mac": iface.MAC,
"mtu": iface.MTU,
"name": iface.Name,
"net_id": iface.NetID,
"netmask": iface.NetMask,
"net_type": iface.NetType,
"node_id": iface.NodeID,
"pci_slot": iface.PCISlot,
"qos": flattenQOS(iface.QOS),
"target": iface.Target,
"type": iface.Type,
"vnfs": iface.VNFs,
"libvirt_settings": flattenLibvirtSettings(iface.LibvirtSettings),
})
}
return res
}
func flattenLibvirtSettings(libvirtSettings compute.LibvirtSettings) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"guid": libvirtSettings.GUID,
"txmode": libvirtSettings.TXMode,
"ioeventfd": libvirtSettings.IOEventFD,
"event_idx": libvirtSettings.EventIDx,
"queues": libvirtSettings.Queues,
"rx_queue_size": libvirtSettings.RXQueueSize,
"tx_queue_size": libvirtSettings.TXQueueSize,
}
res = append(res, temp)
return res
}
func flattenQOS(qos compute.QOS) []map[string]interface{} {
return []map[string]interface{}{
{
@@ -275,6 +294,7 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
"affinity_weight": computeItem.AffinityWeight,
"anti_affinity_rules": flattenListRules(computeItem.AntiAffinityRules),
"arch": computeItem.Arch,
"chipset": computeItem.Chipset,
"cd_image_id": computeItem.CdImageId,
"boot_order": computeItem.BootOrder,
"bootdisk_size": computeItem.BootDiskSize,
@@ -406,8 +426,9 @@ func flattenDisks(disks []compute.InfoDisk) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, disk := range disks {
temp := map[string]interface{}{
"disk_id": disk.ID,
"pci_slot": disk.PCISlot,
"bus_number": disk.BusNumber,
"disk_id": disk.ID,
"pci_slot": disk.PCISlot,
}
res = append(res, temp)
}
@@ -593,6 +614,7 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
d.Set("anti_affinity_rules", flattenAffinityRules(compFacts.AntiAffinityRules))
d.Set("arch", compFacts.Arch)
d.Set("boot_order", compFacts.BootOrder)
d.Set("chipset", compFacts.Chipset)
d.Set("cd_image_id", compFacts.CdImageId)
d.Set("clone_reference", compFacts.CloneReference)
d.Set("clones", compFacts.Clones)
@@ -708,6 +730,7 @@ func flattenDisk(diskList compute.ListDisks) []map[string]interface{} {
"meta": flattens.FlattenMeta(disk.Meta),
"account_id": disk.AccountID,
"boot_partition": disk.BootPartition,
"bus_number": disk.BusNumber,
"created_time": disk.CreatedTime,
"deleted_time": disk.DeletedTime,
"desc": disk.Description,

View File

@@ -2,7 +2,6 @@ package kvmvm
import (
"context"
"errors"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@@ -37,7 +36,7 @@ func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *contro
func existNetworks(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) []error {
var errs []error
var vinsIds, extNetIds, vfpoolIds []uint64
var vinsIds, extNetIds, vfpoolIds, dpdkIds []uint64
networksIface, ok := d.GetOk("network")
if !ok {
@@ -54,10 +53,9 @@ func existNetworks(ctx context.Context, d *schema.ResourceData, c *controller.Co
case "EXTNET":
extNetIds = append(extNetIds, uint64(network["net_id"].(int)))
case "VFNIC":
if d.Get("driver").(string) == "KVM_PPC" {
errs = append(errs, errors.New("'VFNIC' net_type is not allowed for driver 'KVM_PPC'"))
}
vfpoolIds = append(vfpoolIds, uint64(network["net_id"].(int)))
case "DPDK":
dpdkIds = append(dpdkIds, uint64(network["net_id"].(int)))
default:
continue
}
@@ -75,5 +73,9 @@ func existNetworks(ctx context.Context, d *schema.ResourceData, c *controller.Co
errs = append(errs, vfpoolErrs...)
}
if dpdkErrs := ic.ExistDPDKNet(ctx, dpdkIds, c); dpdkErrs != nil {
errs = append(errs, dpdkErrs...)
}
return errs
}

View File

@@ -42,7 +42,6 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/kvmppc"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/kvmx86"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
@@ -55,59 +54,48 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
c := m.(*controller.ControllerCfg)
createReqX86 := kvmx86.CreateRequest{}
createReqPPC := kvmppc.CreateRequest{}
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
if desc, ok := d.GetOk("description"); ok {
createReqPPC.Description = desc.(string)
createReqX86.Description = desc.(string)
}
if sepID, ok := d.GetOk("sep_id"); ok {
createReqPPC.SEPID = uint64(sepID.(int))
createReqX86.SEPID = uint64(sepID.(int))
}
if pool, ok := d.GetOk("pool"); ok {
createReqPPC.Pool = pool.(string)
createReqX86.Pool = pool.(string)
}
if stackID, ok := d.GetOk("stack_id"); ok {
createReqPPC.StackID = uint64(stackID.(int))
createReqX86.StackID = uint64(stackID.(int))
}
if start, ok := d.GetOk("started"); ok {
createReqPPC.Start = start.(bool)
createReqX86.Start = start.(bool)
}
if ipaType, ok := d.GetOk("ipa_type"); ok {
createReqPPC.IPAType = ipaType.(string)
createReqX86.IPAType = ipaType.(string)
}
if bootSize, ok := d.GetOk("boot_disk_size"); ok {
createReqPPC.BootDisk = uint64(bootSize.(int))
createReqX86.BootDisk = uint64(bootSize.(int))
}
if IS, ok := d.GetOk("is"); ok {
createReqPPC.IS = IS.(string)
createReqX86.IS = IS.(string)
}
createReqX86.Interfaces = make([]kvmx86.Interface, 0)
createReqPPC.Interfaces = make([]kvmppc.Interface, 0)
if networks, ok := d.GetOk("network"); ok {
if networks.(*schema.Set).Len() > 0 {
ns := networks.(*schema.Set).List()
log.Debugf("BEFORE SORT %v", ns)
sort.Slice(ns, func(i, j int) bool {
weightI := ns[i].(map[string]interface{})["weight"].(int)
weightJ := ns[j].(map[string]interface{})["weight"].(int)
@@ -119,9 +107,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
return weightI < weightJ
})
log.Debugf("AFTER SORT %v", ns)
interfacesX86 := make([]kvmx86.Interface, 0)
interfacesPPC := make([]kvmppc.Interface, 0)
for _, elem := range ns {
netInterfaceVal := elem.(map[string]interface{})
@@ -139,28 +125,11 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
createReqX86.Interfaces = interfacesX86
for _, elem := range ns {
netInterfaceVal := elem.(map[string]interface{})
reqInterface := kvmppc.Interface{
NetType: netInterfaceVal["net_type"].(string),
NetID: uint64(netInterfaceVal["net_id"].(int)),
}
ipaddr, ipSet := netInterfaceVal["ip_address"]
if ipSet {
reqInterface.IPAddr = ipaddr.(string)
}
interfacesPPC = append(interfacesPPC, reqInterface)
}
createReqPPC.Interfaces = interfacesPPC
}
}
if disks, ok := d.GetOk("disks"); ok {
disksX86 := make([]kvmx86.DataDisk, 0)
disksPPC := make([]kvmppc.DataDisk, 0)
for _, elem := range disks.([]interface{}) {
diskVal := elem.(map[string]interface{})
@@ -185,95 +154,57 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
createReqX86.DataDisks = disksX86
for _, elem := range disks.([]interface{}) {
diskVal := elem.(map[string]interface{})
reqDataDisk := kvmppc.DataDisk{
DiskName: diskVal["disk_name"].(string),
Size: uint64(diskVal["size"].(int)),
}
if sepId, ok := diskVal["sep_id"]; ok {
reqDataDisk.SepID = uint64(sepId.(int))
}
if pool, ok := diskVal["pool"]; ok {
reqDataDisk.Pool = pool.(string)
}
if desc, ok := diskVal["desc"]; ok {
reqDataDisk.Description = desc.(string)
}
if imageID, ok := diskVal["image_id"]; ok {
reqDataDisk.ImageID = uint64(imageID.(int))
}
disksPPC = append(disksPPC, reqDataDisk)
}
createReqPPC.DataDisks = disksPPC
}
if cloudInit, ok := d.GetOk("cloud_init"); ok {
userdata := cloudInit.(string)
if userdata != "" && userdata != "applied" {
createReqPPC.Userdata = strings.TrimSpace(userdata)
createReqX86.Userdata = strings.TrimSpace(userdata)
}
}
var computeId uint64
driver := d.Get("driver").(string)
if driver == "KVM_PPC" {
createReqPPC.RGID = uint64(d.Get("rg_id").(int))
createReqPPC.Name = d.Get("name").(string)
createReqPPC.CPU = uint64(d.Get("cpu").(int))
createReqPPC.RAM = uint64(d.Get("ram").(int))
createReqPPC.ImageID = uint64(d.Get("image_id").(int))
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM PowerPC")
apiResp, err := c.CloudBroker().KVMPPC().Create(ctx, createReqPPC)
if err != nil {
return diag.FromErr(err)
}
createReqX86.RGID = uint64(d.Get("rg_id").(int))
createReqX86.Name = d.Get("name").(string)
createReqX86.CPU = uint64(d.Get("cpu").(int))
createReqX86.RAM = uint64(d.Get("ram").(int))
d.SetId(strconv.FormatUint(apiResp, 10))
computeId = apiResp
} else {
createReqX86.RGID = uint64(d.Get("rg_id").(int))
createReqX86.Name = d.Get("name").(string)
createReqX86.CPU = uint64(d.Get("cpu").(int))
createReqX86.RAM = uint64(d.Get("ram").(int))
if image, ok := d.GetOk("image_id"); ok {
createReqX86.ImageID = uint64(image.(int))
}
if withoutBootDisk, ok := d.GetOk("without_boot_disk"); ok {
createReqX86.WithoutBootDisk = withoutBootDisk.(bool)
}
createReqX86.Driver = driver
if custom_fields, ok := d.GetOk("custom_fields"); ok {
val := custom_fields.(string)
val = strings.ReplaceAll(val, "\\", "")
val = strings.ReplaceAll(val, "\n", "")
val = strings.ReplaceAll(val, "\t", "")
val = strings.TrimSpace(val)
createReqX86.CustomField = val
}
if numaAffinity, ok := d.GetOk("numa_affinity"); ok {
createReqX86.NumaAffinity = numaAffinity.(string)
}
createReqX86.CPUPin = d.Get("cpu_pin").(bool)
createReqX86.HPBacked = d.Get("hp_backed").(bool)
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
apiResp, err := c.CloudBroker().KVMX86().Create(ctx, createReqX86)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(apiResp, 10))
computeId = apiResp
if image, ok := d.GetOk("image_id"); ok {
createReqX86.ImageID = uint64(image.(int))
}
if withoutBootDisk, ok := d.GetOk("without_boot_disk"); ok {
createReqX86.WithoutBootDisk = withoutBootDisk.(bool)
}
createReqX86.Driver = driver
if custom_fields, ok := d.GetOk("custom_fields"); ok {
val := custom_fields.(string)
val = strings.ReplaceAll(val, "\\", "")
val = strings.ReplaceAll(val, "\n", "")
val = strings.ReplaceAll(val, "\t", "")
val = strings.TrimSpace(val)
createReqX86.CustomField = val
}
if numaAffinity, ok := d.GetOk("numa_affinity"); ok {
createReqX86.NumaAffinity = numaAffinity.(string)
}
createReqX86.CPUPin = d.Get("cpu_pin").(bool)
createReqX86.HPBacked = d.Get("hp_backed").(bool)
createReqX86.Chipset = d.Get("chipset").(string)
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
apiResp, err := c.CloudBroker().KVMX86().Create(ctx, createReqX86)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(apiResp, 10))
computeId = apiResp
warnings := dc.Warnings{}
@@ -285,9 +216,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
Permanently: true,
DetachDisks: true,
}
if reason, ok := d.Get("reason").(string); ok {
req.Reason = reason
}
if _, err := c.CloudBroker().Compute().Delete(ctx, req); err != nil {
log.Errorf("resourceComputeCreate: could not delete compute after failed creation: %v", err)
}
@@ -439,9 +368,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
if int64(pfwItem["public_port_end"].(int)) != 0 {
req.PublicPortEnd = int64(pfwItem["public_port_end"].(int))
}
if pfwItem["reason"].(string) != "" {
req.Reason = pfwItem["reason"].(string)
}
pwfId, err := c.CloudBroker().Compute().PFWAdd(ctx, req)
if err != nil {
warnings.Add(err)
@@ -499,9 +426,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
ComputeID: computeId,
CDROMID: uint64(snapshotItem["cdrom_id"].(int)),
}
if snapshotItem["reason"].(string) != "" {
req.Reason = snapshotItem["reason"].(string)
}
_, err := c.CloudBroker().Compute().CDInsert(ctx, req)
if err != nil {
warnings.Add(err)
@@ -560,6 +485,31 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
}
if ars, ok := d.GetOk("libvirt_settings"); ok {
log.Debugf("resourceComputeCreate: Configure libvirt virtio interface parameters on ComputeID: %d", computeId)
settings := ars.(*schema.Set).List()
if len(settings) > 0 {
for _, v := range settings {
settingsConv := v.(map[string]interface{})
req := compute.SetNetConfigRequest{
ComputeID: computeId,
MAC: settingsConv["mac"].(string),
TXMode: settingsConv["txmode"].(string),
IOEventFD: settingsConv["ioeventfd"].(string),
EventIDx: settingsConv["event_idx"].(string),
Queues: uint64(settingsConv["queues"].(int)),
RXQueueSize: uint64(settingsConv["rx_queue_size"].(int)),
TXQueueSize: uint64(settingsConv["tx_queue_size"].(int)),
}
_, err := c.CloudBroker().Compute().SetNetConfig(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
}
}
log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", computeId, d.Get("name").(string))
@@ -823,6 +773,12 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if d.HasChange("libvirt_settings") {
if err := utilityComputeUpdateLibvirtSettings(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
return append(resourceComputeRead(ctx, d, m), warnings.Get()...)
}

View File

@@ -15,11 +15,6 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Required: true,
Description: "Get compute by id",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "reason for action",
},
"account_id": {
Type: schema.TypeInt,
@@ -146,6 +141,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
},
},
"chipset": {
Type: schema.TypeString,
Computed: true,
},
"cd_image_id": {
Type: schema.TypeInt,
Computed: true,
@@ -225,6 +224,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"bus_number": {
Type: schema.TypeInt,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
@@ -554,6 +557,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"bus_number": {
Type: schema.TypeInt,
Computed: true,
},
"conn_id": {
Type: schema.TypeInt,
Computed: true,
@@ -590,6 +597,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"mtu": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
@@ -614,6 +625,7 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"qos": {
Type: schema.TypeList,
Computed: true,
@@ -638,6 +650,42 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"libvirt_settings": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"txmode": {
Type: schema.TypeString,
Computed: true,
},
"ioeventfd": {
Type: schema.TypeString,
Computed: true,
},
"event_idx": {
Type: schema.TypeString,
Computed: true,
},
"queues": {
Type: schema.TypeInt,
Computed: true,
},
"rx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
"tx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"target": {
Type: schema.TypeString,
Computed: true,
@@ -932,6 +980,16 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "Find by IP address",
},
"stack_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by stack ID",
},
"image_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by image ID",
},
"extnet_name": {
Type: schema.TypeString,
Optional: true,
@@ -1098,6 +1156,10 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"chipset": {
Type: schema.TypeString,
Computed: true,
},
"cd_image_id": {
Type: schema.TypeInt,
Computed: true,
@@ -1158,6 +1220,10 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"bus_number": {
Type: schema.TypeInt,
Computed: true,
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
@@ -1198,6 +1264,10 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"bus_number": {
Type: schema.TypeInt,
Computed: true,
},
"conn_id": {
Type: schema.TypeInt,
Computed: true,
@@ -1234,6 +1304,10 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"mtu": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
@@ -1282,6 +1356,42 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
},
},
},
"libvirt_settings": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"txmode": {
Type: schema.TypeString,
Computed: true,
},
"ioeventfd": {
Type: schema.TypeString,
Computed: true,
},
"event_idx": {
Type: schema.TypeString,
Computed: true,
},
"queues": {
Type: schema.TypeInt,
Computed: true,
},
"rx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
"tx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"target": {
Type: schema.TypeString,
Computed: true,
@@ -2153,10 +2263,6 @@ func dataSourceComputeGetAuditsSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"items": {
Type: schema.TypeList,
@@ -2196,10 +2302,6 @@ func dataSourceComputePfwListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"items": {
Type: schema.TypeList,
@@ -2792,7 +2894,7 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Required: true,
StateFunc: statefuncs.StateFuncToUpper,
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86"}, false), // observe case while validating
Description: "Hardware architecture of this compute instance.",
},
"cpu": {
@@ -2815,6 +2917,12 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "ID of the OS image to base this compute instance on.",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Type of the emulated system.",
},
"without_boot_disk": {
Type: schema.TypeBool,
Optional: true,
@@ -2900,7 +3008,7 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Required: true,
StateFunc: statefuncs.StateFuncToUpper,
ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS", "VFNIC"}, false), // observe case while validating
ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS", "VFNIC", "DPDK"}, false), // observe case while validating
Description: "Type of the network for this connection, either EXTNET or VINS.",
},
@@ -2934,11 +3042,7 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "reason for action",
},
"affinity_label": {
Type: schema.TypeString,
Optional: true,
@@ -2981,6 +3085,11 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"delete_async_mode": {
Type: schema.TypeBool,
Computed: true,
Description: "async mode",
},
"anti_affinity_rules": {
Type: schema.TypeList,
Optional: true,
@@ -3092,11 +3201,6 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "reason for action",
},
},
},
},
@@ -3152,11 +3256,6 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "reason for action",
},
},
},
},
@@ -3211,11 +3310,6 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "reason for action",
},
},
},
},
@@ -3317,6 +3411,43 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
Description: "ID of the connected pci devices",
},
"libvirt_settings": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"mac": {
Type: schema.TypeString,
Required: true,
},
"txmode": {
Type: schema.TypeString,
Optional: true,
},
"ioeventfd": {
Type: schema.TypeString,
Optional: true,
},
"event_idx": {
Type: schema.TypeString,
Optional: true,
},
"queues": {
Type: schema.TypeInt,
Optional: true,
},
"rx_queue_size": {
Type: schema.TypeInt,
Optional: true,
},
"tx_queue_size": {
Type: schema.TypeInt,
Optional: true,
},
},
},
Description: "Configure libvirt virtio interface parameters. You can only delete values locally. Data on the platform cannot be deleted.",
},
// Computed properties
"account_id": {
Type: schema.TypeInt,
@@ -3436,6 +3567,10 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"bus_number": {
Type: schema.TypeInt,
Computed: true,
},
"conn_id": {
Type: schema.TypeInt,
Computed: true,
@@ -3472,6 +3607,10 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"mtu": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
@@ -3520,6 +3659,42 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"libvirt_settings": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"txmode": {
Type: schema.TypeString,
Computed: true,
},
"ioeventfd": {
Type: schema.TypeString,
Computed: true,
},
"event_idx": {
Type: schema.TypeString,
Computed: true,
},
"queues": {
Type: schema.TypeInt,
Computed: true,
},
"rx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
"tx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"target": {
Type: schema.TypeString,
Computed: true,

View File

@@ -532,10 +532,6 @@ func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m
req.ComputeID = uint64(d.Get("compute_id").(int))
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
res, err := c.CloudBroker().Compute().Get(ctx, req)
if err != nil {
return nil, err
@@ -586,13 +582,16 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
oldSet, newSet := d.GetChange("network")
oldList := oldSet.(*schema.Set).List()
newList := newSet.(*schema.Set).List()
detachMap, changeIpMap, attachMap := differenceNetwork(oldList, newList)
apiErrCount := 0
var lastSavedError error
detachSet := oldSet.(*schema.Set).Difference(newSet.(*schema.Set))
log.Debugf("utilityComputeNetworksConfigure: detach set has %d items for Compute ID %s", detachSet.Len(), d.Id())
for _, runner := range detachSet.List() {
netData := runner.(map[string]interface{})
log.Debugf("utilityComputeNetworksConfigure: detach set has %d items for Compute ID %s", len(detachMap), d.Id())
for _, netData := range detachMap {
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
req := compute.NetDetachRequest{
ComputeID: computeId,
@@ -609,21 +608,40 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
}
}
log.Debugf("utilityComputeNetworksConfigure: changeIp set has %d items for Compute ID %s", len(changeIpMap), d.Id())
for _, netData := range changeIpMap {
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
req := compute.ChangeIPRequest{
ComputeID: computeId,
NetType: netData["net_type"].(string),
NetID: uint64(netData["net_id"].(int)),
IPAddr: netData["ip_address"].(string),
}
_, err := c.CloudBroker().Compute().ChangeIP(ctx, req)
if err != nil {
log.Errorf("utilityComputeNetworksConfigure: failed to change net ID %d of type %s from Compute ID %s: %s",
netData["net_id"].(int), netData["net_type"].(string), d.Id(), err)
apiErrCount++
lastSavedError = err
}
}
needStart := false
if d.Get("network").(*schema.Set).Len() == 1 || oldSet.(*schema.Set).Len() < 1 {
if oldSet.(*schema.Set).Len() == len(detachMap) || oldSet.(*schema.Set).Len() == 0 {
if err := utilityComputeStop(ctx, d, m); err != nil {
apiErrCount++
lastSavedError = err
}
needStart = true
if start := d.Get("started"); start.(bool) {
needStart = true
}
}
attachSet := newSet.(*schema.Set).Difference(oldSet.(*schema.Set))
attachList := attachSet.List()
sort.Slice(attachList, func(i, j int) bool {
weightI := attachList[i].(map[string]interface{})["weight"].(int)
weightJ := attachList[j].(map[string]interface{})["weight"].(int)
sort.Slice(attachMap, func(i, j int) bool {
weightI := attachMap[i]["weight"].(int)
weightJ := attachMap[j]["weight"].(int)
if weightI == 0 {
return false
}
@@ -632,9 +650,8 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
}
return weightI < weightJ
})
log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", attachSet.Len(), d.Id())
for _, runner := range attachList {
netData := runner.(map[string]interface{})
log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", len(attachMap), d.Id())
for _, netData := range attachMap {
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
req := compute.NetAttachRequest{
ComputeID: computeId,
@@ -672,6 +689,53 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
return nil
}
func differenceNetwork(oldList, newList []interface{}) (detachMap, changeIpMap, attachMap []map[string]interface{}) {
attachMap = make([]map[string]interface{}, 0)
changeIpMap = make([]map[string]interface{}, 0)
detachMap = make([]map[string]interface{}, 0)
for _, oldNetwork := range oldList {
oldMap := oldNetwork.(map[string]interface{})
found := false
for _, newNetwork := range newList {
newMap := newNetwork.(map[string]interface{})
if newMap["net_type"] == oldMap["net_type"] && newMap["net_id"] == oldMap["net_id"] && newMap["weight"] == oldMap["weight"] {
if (newMap["net_type"].(string) == "EXTNET" || newMap["net_type"].(string) == "VINS") && newMap["ip_address"] != oldMap["ip_address"] {
changeIpMap = append(changeIpMap, newMap)
found = true
break
} else if newMap["ip_address"] == oldMap["ip_address"] {
found = true
break
}
}
}
if found {
continue
}
detachMap = append(detachMap, oldMap)
}
for _, newNetwork := range newList {
newMap := newNetwork.(map[string]interface{})
found := false
for _, oldNetwork := range oldList {
oldMap := oldNetwork.(map[string]interface{})
if newMap["net_type"] == oldMap["net_type"] && newMap["net_id"] == oldMap["net_id"] && newMap["weight"] == oldMap["weight"] {
if newMap["ip_address"] == oldMap["ip_address"] || ((newMap["net_type"].(string) == "EXTNET" || newMap["net_type"].(string) == "VINS") && newMap["ip_address"] != oldMap["ip_address"]) {
found = true
break
}
}
}
if found {
continue
}
attachMap = append(attachMap, newMap)
}
return
}
func utilityComputeUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
@@ -697,6 +761,10 @@ func utilityComputeUpdate(ctx context.Context, d *schema.ResourceData, m interfa
req.HPBacked = d.Get("hp_backed").(bool)
}
if d.HasChange("chipset") {
req.Chipset = d.Get("chipset").(string)
}
// Note bene: numa_affinity, cpu_pin and hp_backed are not allowed to be changed for compute in STARTED tech status.
// If STARTED, we need to stop it before update
var isStopRequired bool
@@ -947,6 +1015,36 @@ func utilityComputeUpdatePciDevices(ctx context.Context, d *schema.ResourceData,
return nil
}
func utilityComputeUpdateLibvirtSettings(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldSet, newSet := d.GetChange("libvirt_settings")
added := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
if len(added) > 0 {
for _, v := range added {
settingsConv := v.(map[string]interface{})
req := compute.SetNetConfigRequest{
ComputeID: computeId,
MAC: settingsConv["mac"].(string),
TXMode: settingsConv["txmode"].(string),
IOEventFD: settingsConv["ioeventfd"].(string),
EventIDx: settingsConv["event_idx"].(string),
Queues: uint64(settingsConv["queues"].(int)),
RXQueueSize: uint64(settingsConv["rx_queue_size"].(int)),
TXQueueSize: uint64(settingsConv["tx_queue_size"].(int)),
}
_, err := c.CloudBroker().Compute().SetNetConfig(ctx, req)
if err != nil {
return err
}
}
}
return nil
}
func utilityComputeUpdateTags(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
@@ -1013,9 +1111,6 @@ func utilityComputeUpdatePFW(ctx context.Context, d *schema.ResourceData, m inte
} else {
req.PublicPortEnd = uint64(pfwItem["public_port_end"].(int))
}
if pfwItem["reason"].(string) != "" {
req.Reason = pfwItem["reason"].(string)
}
_, err := c.CloudBroker().Compute().PFWDel(ctx, req)
if err != nil {
@@ -1037,9 +1132,6 @@ func utilityComputeUpdatePFW(ctx context.Context, d *schema.ResourceData, m inte
if pfwItem["local_port"].(int) != 0 {
req.LocalBasePort = uint64(pfwItem["local_port"].(int))
}
if pfwItem["reason"].(string) != "" {
req.Reason = pfwItem["reason"].(string)
}
pwfId, err := c.CloudBroker().Compute().PFWAdd(ctx, req)
if err != nil {
@@ -1136,9 +1228,17 @@ func utilityComputeUpdateSnapshot(ctx context.Context, d *schema.ResourceData, m
Label: snapshotItem["label"].(string),
}
_, err := c.CloudBroker().Compute().SnapshotDelete(ctx, req)
if err != nil {
return err
asyncMode, ok := d.GetOk("snapshot_delete_async")
if ok && asyncMode.(bool) {
_, err := c.CloudBroker().Compute().SnapshotDeleteAsync(ctx, req)
if err != nil {
return err
}
} else {
_, err := c.CloudBroker().Compute().SnapshotDelete(ctx, req)
if err != nil {
return err
}
}
}
}
@@ -1218,9 +1318,6 @@ func utilityComputeUpdateCD(ctx context.Context, d *schema.ResourceData, m inter
req := compute.CDEjectRequest{
ComputeID: computeId,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().Compute().CDEject(ctx, req)
if err != nil {
@@ -1235,9 +1332,6 @@ func utilityComputeUpdateCD(ctx context.Context, d *schema.ResourceData, m inter
ComputeID: computeId,
CDROMID: uint64(cdItem["cdrom_id"].(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().Compute().CDInsert(ctx, req)
if err != nil {

View File

@@ -46,10 +46,6 @@ func utilityComputeGetAuditsCheckPresence(ctx context.Context, d *schema.Resourc
ComputeID: uint64(d.Get("compute_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
computeAudits, err := c.CloudBroker().Compute().GetAudits(ctx, req)
if err != nil {
return nil, err

View File

@@ -69,6 +69,12 @@ func utilityDataComputeListCheckPresence(ctx context.Context, d *schema.Resource
if ipAddress, ok := d.GetOk("ip_address"); ok {
req.IPAddress = ipAddress.(string)
}
if stackID, ok := d.GetOk("stack_id"); ok {
req.StackID = stackID.(uint64)
}
if imageID, ok := d.GetOk("image_id"); ok {
req.ImageID = imageID.(uint64)
}
if extNetName, ok := d.GetOk("extnet_name"); ok {
req.ExtNetName = extNetName.(string)
}

View File

@@ -46,10 +46,6 @@ func utilityComputePfwListCheckPresence(ctx context.Context, d *schema.ResourceD
ComputeID: uint64(d.Get("compute_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
listPFWs, err := c.CloudBroker().Compute().PFWList(ctx, req)
if err != nil {
return nil, err

View File

@@ -58,7 +58,7 @@ func resourceLBCreate(ctx context.Context, d *schema.ResourceData, m interface{}
req := lb.CreateRequest{
Name: d.Get("name").(string),
RGID: uint64(d.Get("rg_id").(int)),
ExtNetID: uint64(d.Get("extnet_id").(int)),
ExtNetID: int64(d.Get("extnet_id").(int)),
VINSID: uint64(d.Get("vins_id").(int)),
}
if start, ok := d.GetOk("start"); ok {

View File

@@ -475,7 +475,6 @@ func flattenResourceRG(d *schema.ResourceData, rgData *rg.RecordRG) {
d.Set("gid", rgData.GID)
d.Set("rg_name", rgData.Name)
d.Set("resource_limits", flattenRgResourceLimits(rgData.ResourceLimits))
d.Set("def_net_type", rgData.DefNetType)
d.Set("description", rgData.Description)
d.Set("register_computes", rgData.RegisterComputes)
d.Set("uniq_pools", rgData.UniqPools)

View File

@@ -77,7 +77,6 @@ type ResgroupUpdateParam struct {
Disk int `json:"maxVDiskCapacity"`
Cpu int `json:"maxCPUCapacity"`
NetTraffic int `json:"maxNetworkPeerTransfer"`
Reason string `json:"reason"`
}
type AccountAclRecord struct {

View File

@@ -117,18 +117,10 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
req.DefNet = defNetType.(string)
}
if ipcidr, ok := d.GetOk("ipcidr"); ok {
req.IPCIDR = ipcidr.(string)
}
if description, ok := d.GetOk("description"); ok {
req.Description = description.(string)
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if extNetId, ok := d.GetOk("ext_net_id"); ok {
req.ExtNetID = uint64(extNetId.(int))
}
@@ -211,7 +203,6 @@ func resourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interfa
d.Get("rg_name").(string), d.Get("account_id").(int))
//c := m.(*controller.ControllerCfg)
rgData, err := utilityResgroupCheckPresence(ctx, d, m)
if err != nil {
d.SetId("") // ensure ID is empty
@@ -301,8 +292,7 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
restore, ok := d.GetOk("restore")
if ok && restore.(bool) {
restoreReq := rg.RestoreRequest{
RGID: rgData.ID,
Reason: "automatic restore of resource by terraform",
RGID: rgData.ID,
}
_, err := c.CloudBroker().RG().Restore(ctx, restoreReq)
@@ -316,8 +306,7 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
enable, ok := d.GetOk("enable")
if ok && enable.(bool) {
enableReq := rg.EnableRequest{
RGID: rgData.ID,
Reason: "automatic enable of resource by terraform",
RGID: rgData.ID,
}
_, err = c.CloudBroker().RG().Enable(ctx, enableReq)
@@ -349,20 +338,12 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
/* NOTE: we do not allow changing the following attributes of an existing RG via terraform:
- def_net_type
- ipcidr
- ext_net_id
- ext_ip
The following code fragment checks if any of these have been changed and generates error.
*/
if ok := d.HasChange("def_net"); ok {
_, newDefNet := d.GetChange("def_net")
if newDefNet.(*schema.Set).Len() == 0 {
return diag.Errorf("resourceResgroupUpdate: block def_net must not be empty")
}
}
for _, attr := range []string{"def_net_type", "ipcidr", "ext_ip"} {
for _, attr := range []string{"def_net_type", "ext_ip"} {
attrNew, attrOld := d.GetChange(attr)
if attrNew.(string) != attrOld.(string) {
return diag.FromErr(fmt.Errorf("resourceResgroupUpdate: RG ID %s: changing %s for existing RG is not allowed", d.Id(), attr))
@@ -444,6 +425,9 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
if d.HasChange("uniq_pools") {
uniqPools := d.Get("uniq_pools").([]interface{})
if len(uniqPools) == 0 {
req.ClearUniqPools = true
}
for _, pool := range uniqPools {
req.UniqPools = append(req.UniqPools, pool.(string))
}
@@ -468,6 +452,13 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
}
if d.HasChange("def_net") {
oldDefNet, _ := d.GetChange("def_net")
if oldDefNet.(*schema.Set).Len() > 0 {
_, err := c.CloudBroker().RG().RemoveDefNet(ctx, rg.RemoveDefNetRequest{RGID: rgData.ID})
if err != nil {
return diag.FromErr(err)
}
}
if err := resourceRGSetDefNet(ctx, d, m); err != nil {
return diag.FromErr(err)
}
@@ -520,9 +511,6 @@ func resourceResgroupDelete(ctx context.Context, d *schema.ResourceData, m inter
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
c := m.(*controller.ControllerCfg)
_, err = c.CloudBroker().RG().Delete(ctx, req)
@@ -559,10 +547,6 @@ func resourceRGAccessGrant(ctx context.Context, d *schema.ResourceData, m interf
Right: right,
}
if reason, ok := access["reason"]; ok {
req.Reason = reason.(string)
}
if _, err := c.CloudBroker().RG().AccessGrant(ctx, req); err != nil {
errs = append(errs, err)
}
@@ -593,9 +577,6 @@ func resourceRGSetDefNet(ctx context.Context, d *schema.ResourceData, m interfac
if netID, ok := defNetItem["net_id"]; ok {
req.NetID = uint64(netID.(int))
}
if reason, ok := defNetItem["reason"]; ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().RG().SetDefNet(ctx, req)
return err
@@ -651,10 +632,6 @@ func resourceRGChangeAccess(ctx context.Context, d *schema.ResourceData, m inter
User: user,
}
if reason, ok := deleteItem["reason"]; ok {
reqRevoke.Reason = reason.(string)
}
_, err := c.CloudBroker().RG().AccessRevoke(ctx, reqRevoke)
if err != nil {
return err
@@ -673,10 +650,6 @@ func resourceRGChangeAccess(ctx context.Context, d *schema.ResourceData, m inter
Right: right,
}
if reason, ok := addedItem["reason"]; ok {
reqGrant.Reason = reason.(string)
}
_, err := c.CloudBroker().RG().AccessGrant(ctx, reqGrant)
if err != nil {
return err
@@ -749,6 +722,17 @@ func ResourceResgroup() *schema.Resource {
StateContext: schema.ImportStatePassthroughContext,
},
CustomizeDiff: func(ctx context.Context, diff *schema.ResourceDiff, i interface{}) error {
if diff.HasChange("def_net") {
diff.SetNewComputed("def_net_id")
}
if diff.HasChanges() {
diff.SetNewComputed("updated_by")
diff.SetNewComputed("updated_time")
}
return nil
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout600s,
Read: &constants.Timeout300s,

View File

@@ -11,10 +11,7 @@ func dataSourceRgSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
@@ -739,10 +736,6 @@ func dataSourceRgUsageSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"cpu": {
Type: schema.TypeInt,
@@ -2268,7 +2261,6 @@ func resourceRgSchemaMake() map[string]*schema.Schema {
"def_net_type": {
Type: schema.TypeString,
Optional: true,
Computed: true,
// Default: "PRIVATE",
ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC", "NONE"}, false),
Description: "Type of the network, which this resource group will use as default for its computes - PRIVATE or PUBLIC or NONE.",
@@ -2287,11 +2279,6 @@ func resourceRgSchemaMake() map[string]*schema.Schema {
Description: "User-defined text description of this resource group.",
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"ext_net_id": {
Type: schema.TypeInt,
Optional: true,
@@ -2337,11 +2324,6 @@ func resourceRgSchemaMake() map[string]*schema.Schema {
Required: true,
Description: "Access rights to set, one of 'R', 'RCX' or 'ARCXDU'",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "Reason for action",
},
},
},
},
@@ -2364,11 +2346,6 @@ func resourceRgSchemaMake() map[string]*schema.Schema {
Default: 0,
Description: "Network segment ID. If netType is PUBLIC and netId is 0 then default external network segment will be selected. If netType is PRIVATE and netId=0, the first ViNS defined for this RG will be selected. Otherwise, netId identifies either existing external network segment or ViNS.",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "Reason for action",
},
},
},
},

View File

@@ -50,9 +50,6 @@ func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m
} else {
req.RGID = uint64(d.Get("rg_id").(int))
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
rgData, err := c.CloudBroker().RG().Get(ctx, req)
if err != nil {

View File

@@ -46,10 +46,6 @@ func utilityDataRgUsageCheckPresence(ctx context.Context, d *schema.ResourceData
RGID: uint64(d.Get("rg_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
usage, err := c.CloudBroker().RG().Usage(ctx, req)
if err != nil {
return nil, err

Some files were not shown because too many files have changed in this diff Show More