This commit is contained in:
2024-11-12 13:41:38 +03:00
parent 040af43607
commit 36879efd58
517 changed files with 37877 additions and 1900 deletions

View File

@@ -49,11 +49,6 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Required: true,
},
"recursive_delete": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
},
},
},

View File

@@ -165,9 +165,8 @@ func utilityAccountUsersUpdate(ctx context.Context, d *schema.ResourceData, m in
userConv := user.(map[string]interface{})
_, err := c.CloudBroker().Account().DeleteUser(ctx, account.DeleteUserRequest{
AccountID: acc.ID,
UserName: userConv["user_id"].(string),
RecursiveDelete: userConv["recursive_delete"].(bool),
AccountID: acc.ID,
UserName: userConv["user_id"].(string),
})
if err != nil {
@@ -414,8 +413,7 @@ func isChangedUser(els []interface{}, el interface{}) bool {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["user_id"].(string) == elConv["user_id"].(string) &&
(!strings.EqualFold(elOldConv["access_type"].(string), elConv["access_type"].(string)) ||
elOldConv["recursive_delete"].(bool) != elConv["recursive_delete"].(bool)) {
(!strings.EqualFold(elOldConv["access_type"].(string), elConv["access_type"].(string))) {
return true
}
}

View File

@@ -0,0 +1,92 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Sergey Kisil, <svkisil@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package audit
import (
"context"
"os"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceAuditsToFileRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
filePath := "audits.tar.gz"
if userPath, ok := d.GetOk("file_path"); ok {
filePath = userPath.(string)
}
log.Debugf("dataSourceAuditsToFileRead: create file with name: %s", filePath)
file, err := os.Create(filePath)
defer file.Close()
if err != nil {
d.SetId("") // ensure ID is empty in this case
return diag.FromErr(err)
}
data, err := utilityAuditsToFileCheckPresence(ctx, d, m)
if err != nil {
d.SetId("") // ensure ID is empty in this case
return diag.FromErr(err)
}
log.Debugf("dataSourceAuditsToFileRead: write data to file with name: %s", filePath)
_, err = file.Write(data)
if err != nil {
d.SetId("") // ensure ID is empty in this case
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
return nil
}
func DataSourceAuditsToFile() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAuditsToFileRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAuditToFileSchemaMake(),
}
}

View File

@@ -41,7 +41,6 @@ import (
func flattenAudit(d *schema.ResourceData, au *audit.RecordAudit) {
log.Debugf("flattenAudit: decoded audit guid %s", d.Get("audit_guid").(string))
d.Set("apitask", au.Apitask)
d.Set("args", au.Arguments)
d.Set("call", au.Call)
d.Set("guid", au.GUID)

View File

@@ -10,10 +10,6 @@ func dataSourceAuditSchemaMake() map[string]*schema.Schema {
Description: "audit guid",
},
"apitask": {
Type: schema.TypeString,
Computed: true,
},
"args": {
Type: schema.TypeString,
Computed: true,
@@ -211,3 +207,13 @@ func dataSourceLinkedJobsSchemaMake() map[string]*schema.Schema {
},
}
}
func dataSourceAuditToFileSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"file_path": {
Type: schema.TypeString,
Optional: true,
Description: "file path",
},
}
}

View File

@@ -0,0 +1,54 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Sergey Kisil, <svkisil@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package audit
import (
"context"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityAuditsToFileCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) ([]byte, error) {
c := m.(*controller.ControllerCfg)
log.Debugf("utilityAuditToFileCheckPresence: load audit file")
auditTar, err := c.CloudBroker().Audit().ExportAuditsToFile(ctx)
if err != nil {
return nil, err
}
return auditTar, nil
}

View File

@@ -264,7 +264,6 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface
DiskID: disk.ID,
Detach: d.Get("detach").(bool),
Permanently: d.Get("permanently").(bool),
Reason: d.Get("reason").(string),
}
_, err = c.CloudBroker().Disks().Delete(ctx, req)
@@ -334,12 +333,6 @@ func resourceDiskRestore(ctx context.Context, d *schema.ResourceData, m interfac
DiskID: uint64(d.Get("disk_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudBroker().Disks().Restore(ctx, req)
return err
}

View File

@@ -177,7 +177,6 @@ func resourceDiskReplicationDelete(ctx context.Context, d *schema.ResourceData,
DiskID: disk.ID,
Detach: d.Get("detach").(bool),
Permanently: d.Get("permanently").(bool),
Reason: d.Get("reason").(string),
}
log.Debugf("resourceDiskReplicationDelete: delete disk replica for disk with ID: %d", diskId)

View File

@@ -207,27 +207,27 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Type: schema.TypeInt,
Computed: true,
},
"pool_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"self_volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"storage_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
},
@@ -611,27 +611,27 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Type: schema.TypeInt,
Computed: true,
},
"pool_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"self_volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"storage_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
},
@@ -1006,27 +1006,27 @@ func dataSourceDiskListDeletedSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Type: schema.TypeInt,
Computed: true,
},
"pool_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"self_volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"storage_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
},
@@ -1859,12 +1859,6 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "whether to completely delete the disk, works only with non attached disks",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: "reason for an action",
},
"shareable": {
Type: schema.TypeBool,
Optional: true,
@@ -2072,27 +2066,27 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Type: schema.TypeInt,
Computed: true,
},
"pool_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"self_volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"storage_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
},
@@ -2393,27 +2387,27 @@ func dataSourceDiskReplicationSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Type: schema.TypeInt,
Computed: true,
},
"pool_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"self_volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"storage_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
},
@@ -2568,11 +2562,7 @@ func resourceDiskReplicationSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "Delete disk permanently",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "Reason for disk deletion",
},
"replica_disk_id": {
Type: schema.TypeInt,
Computed: true,
@@ -2771,27 +2761,27 @@ func resourceDiskReplicationSchemaMake() map[string]*schema.Schema {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Type: schema.TypeInt,
Computed: true,
},
"pool_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"self_volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"storage_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
"volume_id": {
Type: schema.TypeString,
Type: schema.TypeString,
Computed: true,
},
},
@@ -2889,4 +2879,4 @@ func resourceDiskReplicationSchemaMake() map[string]*schema.Schema {
}
return rets
}
}

View File

@@ -0,0 +1,72 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package dpdknet
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceDPDKNetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
dpdk, err := utilityDPDKNetCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
flattenDPDKNet(d, dpdk)
return nil
}
func DataSourceDPDKNet() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDPDKNetRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDPDKNetSchemaMake(),
}
}

View File

@@ -0,0 +1,72 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package dpdknet
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceDPDKNetListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
dpdkList, err := utilityDPDKNetListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenDPDKNetList(dpdkList))
d.Set("entry_count", dpdkList.EntryCount)
return nil
}
func DataSourceDPDKNetList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDPDKNetListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDPDKNetListSchemaMake(),
}
}

View File

@@ -0,0 +1,44 @@
package dpdknet
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
dpdk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/dpdknet"
)
func flattenDPDKNet(d *schema.ResourceData, dpdk *dpdk.RecordDPDKNet) {
d.Set("dpdk_id", dpdk.ID)
d.Set("account_access", dpdk.AccountAccess)
d.Set("created_time", dpdk.CreatedTime)
d.Set("desc", dpdk.Description)
d.Set("gid", dpdk.GID)
d.Set("guid", dpdk.GUID)
d.Set("name", dpdk.Name)
d.Set("rg_access", dpdk.RGAccess)
d.Set("status", dpdk.Status)
d.Set("ovs_bridge", dpdk.OVSBridge)
d.Set("vlan_id", dpdk.VlanID)
d.Set("compute_ids", dpdk.ComputeIDs)
d.Set("updated_time", dpdk.UpdatedTime)
}
func flattenDPDKNetList(list *dpdk.ListDPDKNet) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(list.Data))
for _, dpdk := range list.Data {
temp := map[string]interface{}{
"dpdk_id": dpdk.ID,
"account_access": dpdk.AccountAccess,
"desc": dpdk.Description,
"gid": dpdk.GID,
"guid": dpdk.GUID,
"name": dpdk.Name,
"rg_access": dpdk.RGAccess,
"status": dpdk.Status,
"ovs_bridge": dpdk.OVSBridge,
"vlan_id": dpdk.VlanID,
"compute_ids": dpdk.ComputeIDs,
"updated_time": dpdk.UpdatedTime,
}
res = append(res, temp)
}
return res
}

View File

@@ -0,0 +1,24 @@
package dpdknet
import (
"context"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics {
var errs []error
gid := uint64(d.Get("gid").(int))
if err := ic.ExistGID(ctx, gid, c); err != nil {
errs = append(errs, err)
}
return dc.ErrorsToDiagnostics(errs)
}

View File

@@ -0,0 +1,246 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package dpdknet
import (
"context"
"strconv"
dpdk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/dpdknet"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
)
func resourceDPDKNetCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDPDKNetCreate: called for DPDK network %s", d.Get("name").(string))
c := m.(*controller.ControllerCfg)
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
req := dpdk.CreateRequest{
Name: d.Get("name").(string),
GID: uint64(d.Get("gid").(int)),
VlanID: uint64(d.Get("vlan_id").(int)),
OVSBridge: d.Get("ovs_bridge").(string),
}
if desc, ok := d.GetOk("desc"); ok {
req.Description = desc.(string)
}
if accountAccess, ok := d.GetOk("account_access"); ok {
IDs := accountAccess.([]interface{})
for _, ID := range IDs {
req.AccountAccess = append(req.AccountAccess, uint64(ID.(int)))
}
}
if rgAccess, ok := d.GetOk("rg_access"); ok {
IDs := rgAccess.([]interface{})
for _, ID := range IDs {
req.RGAccess = append(req.RGAccess, uint64(ID.(int)))
}
}
dpdkID, err := c.CloudBroker().DPDKNet().Create(ctx, req)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(dpdkID, 10))
d.Set("dpdk_id", dpdkID)
warnings := dc.Warnings{}
if err = utilityDPDKNetEnabled(ctx, d, m); err != nil {
warnings.Add(err)
}
return append(warnings.Get(), resourceDPDKNetRead(ctx, d, m)...)
}
func resourceDPDKNetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDPDKNetRead: called for pdpk_id %d", d.Get("dpdk_id").(int))
w := dc.Warnings{}
dpdkItem, err := utilityDPDKNetCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
log.Debugf("status: %s", dpdkItem.Status)
switch dpdkItem.Status {
case status.Destroyed, status.Purged:
d.Set("dpdk_id", 0)
d.SetId("")
return diag.Errorf("The resource cannot be read because it has been destroyed")
case status.Deleted:
case status.Assigned:
case status.Modeled:
return diag.Errorf("The DPDK network is in status: %s, please, contact support for more information", dpdkItem.Status)
case status.Creating:
case status.Created:
case status.Allocated:
case status.Unallocated:
}
flattenDPDKNet(d, dpdkItem)
return w.Get()
}
func resourceDPDKNetUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDPDKNetUpdate: called for dpdk_id %d", d.Get("dpdk_id").(int))
c := m.(*controller.ControllerCfg)
w := dc.Warnings{}
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
dpdkItem, err := utilityDPDKNetCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
switch dpdkItem.Status {
case status.Destroyed, status.Purged:
d.Set("dpdk_id", 0)
d.SetId("")
return diag.Errorf("The resource cannot be updated because it has been destroyed")
case status.Deleted:
d.Set("dpdk_id", 0)
d.SetId("")
return diag.Errorf("The resource cannot be updated because it has been deleted")
case status.Assigned:
case status.Modeled:
return diag.Errorf("The DPDK network is in status: %s, please, contact support for more information", dpdkItem.Status)
case status.Creating:
case status.Created:
case status.Allocated:
case status.Unallocated:
}
if d.HasChange("enabled") {
if err := utilityDPDKNetEnabled(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChanges("name", "desc", "vlan_id", "ovs_bridge", "account_access", "rg_access") {
if err := utilityDPDKNetUpdate(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
return append(w.Get(), resourceDPDKNetRead(ctx, d, m)...)
}
func resourceDPDKNetDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDPDKNetDelete: called for dpdk_id %d", d.Get("dpdk_id").(int))
c := m.(*controller.ControllerCfg)
dpdkItem, err := utilityDPDKNetCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
req := dpdk.DeleteRequest{
DPDKID: dpdkItem.ID,
}
if d.Get("enabled") == true {
req := dpdk.DisableRequest{
DPDKID: dpdkItem.ID,
}
if _, err := c.CloudBroker().DPDKNet().Disable(ctx, req); err != nil {
return diag.FromErr(err)
}
}
_, err = c.CloudBroker().DPDKNet().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId("")
return nil
}
func ResourceDPDKNet() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourceDPDKNetCreate,
ReadContext: resourceDPDKNetRead,
UpdateContext: resourceDPDKNetUpdate,
DeleteContext: resourceDPDKNetDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
CustomizeDiff: func(ctx context.Context, diff *schema.ResourceDiff, i interface{}) error {
if diff.HasChange("enable") {
diff.SetNewComputed("status")
}
if diff.HasChanges() {
diff.SetNewComputed("updated_time")
}
return nil
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout600s,
Read: &constants.Timeout300s,
Update: &constants.Timeout300s,
Delete: &constants.Timeout300s,
Default: &constants.Timeout300s,
},
Schema: resourceDPDKNetSchemaMake(),
}
}

View File

@@ -0,0 +1,312 @@
package dpdknet
import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
func dataSourceDPDKNetSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"dpdk_id": {
Type: schema.TypeInt,
Required: true,
Description: "The unique ID of the subscriber-owner of the DPDK network",
},
"account_access": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of accounts with access",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Created time",
},
"desc": {
Type: schema.TypeString,
Computed: true,
Description: "Description of DPDK network",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the grid (platform)",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "DPDK network ID on the storage side",
},
"name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of network",
},
"rg_access": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of resource groups with access",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "DPDK network status",
},
"ovs_bridge": {
Type: schema.TypeString,
Computed: true,
Description: "OVS bridge in which interfaces for computers created",
},
"vlan_id": {
Type: schema.TypeInt,
Computed: true,
Description: "vlan ID",
},
"compute_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "Compute IDs which uses this DPDK network",
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Updated time",
},
}
return res
}
func dataSourceDPDKNetListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by ID",
},
"gid": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by GID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by name",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Description: "Find by description",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Find by status",
},
"compute_ids": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "Find by compute IDs",
},
"sort_by": {
Type: schema.TypeString,
Optional: true,
Description: "sort by one of supported fields, format +|-(field)",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"dpdk_id": {
Type: schema.TypeInt,
Required: true,
Description: "The unique ID of the subscriber-owner of the DPDK network",
},
"account_access": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of accounts with access",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Created time",
},
"desc": {
Type: schema.TypeString,
Computed: true,
Description: "Description of DPDK network",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the grid (platform)",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "DPDK network ID on the storage side",
},
"name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of network",
},
"rg_access": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of resource groups with access",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "DPDK network status",
},
"ovs_bridge": {
Type: schema.TypeString,
Computed: true,
Description: "OVS bridge in which interfaces for computers created",
},
"vlan_id": {
Type: schema.TypeInt,
Computed: true,
Description: "vlan ID",
},
"compute_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "Compute IDs which uses this DPDK network",
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Updated time",
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func resourceDPDKNetSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"gid": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the grid (platform)",
},
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of network",
},
"ovs_bridge": {
Type: schema.TypeString,
Required: true,
Description: "OVS bridge in which interfaces for computers created",
},
"vlan_id": {
Type: schema.TypeInt,
Required: true,
Description: "vlan ID",
},
"dpdk_id": {
Type: schema.TypeInt,
Computed: true,
Description: "The unique ID of the subscriber-owner of the DPDK network",
},
"account_access": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of accounts with access",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Created time",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Description of DPDK network",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Description: "Enabled or disabled DPDK network",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "DPDK network ID on the storage side",
},
"rg_access": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of resource groups with access",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "DPDK network status",
},
"compute_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "Compute IDs which uses this DPDK network",
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Updated time",
},
}
return res
}

View File

@@ -0,0 +1,166 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package dpdknet
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
dpdk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/dpdknet"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func utilityDPDKNetCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*dpdk.RecordDPDKNet, error) {
c := m.(*controller.ControllerCfg)
req := dpdk.GetRequest{}
if d.Get("dpdk_id") != nil {
if d.Get("dpdk_id").(int) == 0 {
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.DPDKID = id
} else {
req.DPDKID = uint64(d.Get("dpdk_id").(int))
}
} else {
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.DPDKID = id
}
log.Debugf("utilityDPDKCheckPresence: get DPDK network")
dpdk, err := c.CloudBroker().DPDKNet().Get(ctx, req)
if err != nil {
return nil, err
}
return dpdk, nil
}
func utilityDPDKNetEnabled(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
dpdkID, _ := strconv.ParseUint(d.Id(), 10, 64)
enabled := d.Get("enabled").(bool)
if enabled {
req := dpdk.EnableRequest{
DPDKID: dpdkID,
}
if _, err := c.CloudBroker().DPDKNet().Enable(ctx, req); err != nil {
return err
}
} else {
req := dpdk.DisableRequest{
DPDKID: dpdkID,
}
if _, err := c.CloudBroker().DPDKNet().Disable(ctx, req); err != nil {
return err
}
}
log.Debugf("resourceDPDKNetUpdate: enable=%v DPDK Network ID %s after completing its resource configuration", enabled, d.Id())
return nil
}
func utilityDPDKNetUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
dpdkID, _ := strconv.ParseUint(d.Id(), 10, 64)
req := dpdk.UpdateRequest{
DPDKID: dpdkID,
}
if d.HasChange("name") {
req.Name = d.Get("name").(string)
}
if d.HasChange("desc") {
req.Description = d.Get("desc").(string)
}
if d.HasChange("vlan_id") {
req.VlanID = uint64(d.Get("vlan_id").(int))
}
if d.HasChange("ovs_bridge") {
req.OVSBridge = d.Get("ovs_bridge").(string)
}
if d.HasChange("account_access") {
if accountAccess, ok := d.GetOk("account_access"); ok {
IDs := accountAccess.([]interface{})
for _, ID := range IDs {
req.AccountAccess = append(req.AccountAccess, uint64(ID.(int)))
}
}
}
if d.HasChange("rg_access") {
if rgAccess, ok := d.GetOk("rg_access"); ok {
IDs := rgAccess.([]interface{})
for _, ID := range IDs {
req.RGAccess = append(req.RGAccess, uint64(ID.(int)))
}
}
}
if d.Get("enabled") == true {
req := dpdk.DisableRequest{
DPDKID: dpdkID,
}
if _, err := c.CloudBroker().DPDKNet().Disable(ctx, req); err != nil {
return err
}
}
if _, err := c.CloudBroker().DPDKNet().Update(ctx, req); err != nil {
return err
}
if d.Get("enabled") == true {
req := dpdk.EnableRequest{
DPDKID: dpdkID,
}
if _, err := c.CloudBroker().DPDKNet().Enable(ctx, req); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,108 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package dpdknet
import (
"context"
log "github.com/sirupsen/logrus"
dpdk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/dpdknet"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityDPDKNetListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*dpdk.ListDPDKNet, error) {
c := m.(*controller.ControllerCfg)
req := dpdk.ListRequest{}
if byID, ok := d.GetOk("by_id"); ok {
req.ByID = uint64(byID.(int))
}
if GID, ok := d.GetOk("gid"); ok {
req.GID = uint64(GID.(int))
}
if name, ok := d.GetOk("name"); ok {
req.Name = name.(string)
}
if desc, ok := d.GetOk("description"); ok {
req.Description = desc.(string)
}
if status, ok := d.GetOk("status"); ok {
req.Status = status.(string)
}
if vlanID, ok := d.GetOk("vlan_id"); ok {
req.VlanID = uint64(vlanID.(int))
}
if computeIDs, ok := d.GetOk("compute_ids"); ok {
IDs := computeIDs.([]interface{})
for _, ID := range IDs {
req.ComputeIDs = append(req.ComputeIDs, uint64(ID.(int)))
}
}
if computeIDs, ok := d.GetOk("compute_ids"); ok {
IDs := computeIDs.([]interface{})
for _, ID := range IDs {
req.ComputeIDs = append(req.ComputeIDs, uint64(ID.(int)))
}
}
if accountAccess, ok := d.GetOk("account_access"); ok {
IDs := accountAccess.([]interface{})
for _, ID := range IDs {
req.AccountAccess = append(req.AccountAccess, uint64(ID.(int)))
}
}
if rgAccess, ok := d.GetOk("rg_access"); ok {
IDs := rgAccess.([]interface{})
for _, ID := range IDs {
req.RGAccess = append(req.RGAccess, uint64(ID.(int)))
}
}
if sortBy, ok := d.GetOk("sort_by"); ok {
req.SortBy = sortBy.(string)
}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
log.Debugf("utilityDPDKListCheckPresence: load DPDK network list")
dpdkList, err := c.CloudBroker().DPDKNet().List(ctx, req)
if err != nil {
return nil, err
}
return dpdkList, nil
}

View File

@@ -0,0 +1,69 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package grid
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceGridGetSettingsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
gridSettings, err := utilityGridGetSettingsCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
flattenGridSettings(d, gridSettings)
return nil
}
func DataSourceGridGetSettings() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceGridGetSettingsRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceGridGetSettingsSchemaMake(),
}
}

View File

@@ -1,12 +1,16 @@
package grid
import (
"encoding/json"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
)
func flattenGrid(d *schema.ResourceData, grid *grid.RecordGrid) {
d.Set("ckey", grid.CKey)
d.Set("meta", flattens.FlattenMeta(grid.Meta))
d.Set("auth_broker", flattens.FlattenMeta(grid.AuthBroker))
d.Set("name", grid.Name)
d.Set("flag", grid.Flag)
@@ -88,3 +92,83 @@ func flattenGridSeps(seps map[string]map[string]grid.DiskUsage) []map[string]int
}
return res
}
func flattenGridSettings(d *schema.ResourceData, gridSettings *grid.RecordSettingsGrid) {
limits, _ := json.Marshal(gridSettings.Limits)
d.Set("allowed_ports", gridSettings.Allowedports)
d.Set("cleanup_retention_period", gridSettings.CleanupRetentionPeriod)
d.Set("docker_registry", flattenDockerRegistry(gridSettings.DockerRegistry))
d.Set("enable_uptime_monitor", gridSettings.EnableUptimeMonitor)
d.Set("extnet_max_pre_reservations_num", gridSettings.ExtnetMaxPreReservationsNum)
d.Set("healthcheck_notifications", flattenHealthcheckNotifications(gridSettings.HealthcheckNotifications))
d.Set("k8s_cleanup_enabled", gridSettings.K8sCleanupEnabled)
d.Set("limits", string(limits))
d.Set("location_url", gridSettings.LocationURL)
d.Set("net_qos", flattenNetQOS(gridSettings.NetQOS))
d.Set("networks", gridSettings.Networks)
d.Set("prometheus", flattenPrometheus(gridSettings.Prometheus))
d.Set("vins_max_pre_reservations_num", gridSettings.VinsMaxPreReservationsNum)
d.Set("vnfdev_mgmt_net_range", gridSettings.VnfdevMgmtNetRange)
}
func flattenDockerRegistry(dr grid.DockerRegistry) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"password": dr.Password,
"server": dr.Server,
"username": dr.Username,
}
res = append(res, temp)
return res
}
func flattenHealthcheckNotifications(hn grid.HealthcheckNotifications) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"emails": flattenEmails(hn.Emails),
}
res = append(res, temp)
return res
}
func flattenEmails(emails []grid.Emails) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(emails))
for _, email := range emails {
temp := map[string]interface{}{
"address": email.Address,
"enabled": email.Enabled,
}
res = append(res, temp)
}
return res
}
func flattenNetQOS(netQOS grid.NetQOS) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"extnet": flattenSettingsNetQOS(netQOS.ExtNet),
"vins": flattenSettingsNetQOS(netQOS.VINS),
}
res = append(res, temp)
return res
}
func flattenSettingsNetQOS(qos grid.SettingsNetQOS) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"e_rate": qos.ERate,
"in_burst": qos.InBurst,
"in_rate": qos.InRate,
}
res = append(res, temp)
return res
}
func flattenPrometheus(pr grid.Prometheus) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"scrape_interval": pr.ScrapeInterval,
}
res = append(res, temp)
return res
}

View File

@@ -8,6 +8,18 @@ func dataSourceGetGridSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"auth_broker": {
Type: schema.TypeList,
Computed: true,
@@ -593,3 +605,160 @@ func dataSourceGridPostDiagnosisSchemaMake() map[string]*schema.Schema {
},
}
}
func dataSourceGridGetSettingsSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"grid_id": {
Type: schema.TypeInt,
Description: "grid (platform) ID",
Required: true,
},
"allowed_ports": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"cleanup_retention_period": {
Type: schema.TypeInt,
Computed: true,
},
"docker_registry": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"password": {
Type: schema.TypeString,
Computed: true,
},
"server": {
Type: schema.TypeString,
Computed: true,
},
"username": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"enable_uptime_monitor": {
Type: schema.TypeBool,
Computed: true,
},
"extnet_max_pre_reservations_num": {
Type: schema.TypeInt,
Computed: true,
},
"healthcheck_notifications": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"emails": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"address": {
Type: schema.TypeString,
Computed: true,
},
"enabled": {
Type: schema.TypeBool,
Computed: true,
},
},
},
},
},
},
},
"k8s_cleanup_enabled": {
Type: schema.TypeBool,
Computed: true,
},
"limits": {
Type: schema.TypeString,
Computed: true,
},
"location_url": {
Type: schema.TypeString,
Computed: true,
},
"net_qos": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"extnet": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"e_rate": {
Type: schema.TypeInt,
Computed: true,
},
"in_burst": {
Type: schema.TypeInt,
Computed: true,
},
"in_rate": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"vins": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"e_rate": {
Type: schema.TypeInt,
Computed: true,
},
"in_burst": {
Type: schema.TypeInt,
Computed: true,
},
"in_rate": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
},
},
},
"networks": {
Type: schema.TypeString,
Computed: true,
},
"prometheus": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"scrape_interval": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"vins_max_pre_reservations_num": {
Type: schema.TypeInt,
Computed: true,
},
"vnfdev_mgmt_net_range": {
Type: schema.TypeString,
Computed: true,
},
}
}

View File

@@ -0,0 +1,63 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package grid
import (
"context"
"strconv"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityGridGetSettingsCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*grid.RecordSettingsGrid, error) {
c := m.(*controller.ControllerCfg)
req := grid.GetSettingsRequest{}
if d.Id() != "" {
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.GID = id
} else {
req.GID = uint64(d.Get("grid_id").(int))
}
log.Debugf("utilityGridutilityGridGetSettingsCheckPresenceCheckPresence: load grid settings")
gridSettingsRec, err := c.CloudBroker().Grid().GetSettings(ctx, req)
if err != nil {
return nil, err
}
return gridSettingsRec, nil
}

View File

@@ -8,6 +8,7 @@ import (
cb_account "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
cb_compute "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
cb_disks "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
cb_dpdk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/dpdknet"
cb_extnet "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet"
cb_gid "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
cb_image "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
@@ -284,6 +285,39 @@ func ExistVFPools(ctx context.Context, vfpoolIds []uint64, c *controller.Control
return errs
}
func ExistDPDKNet(ctx context.Context, dpdkIds []uint64, c *controller.ControllerCfg) []error {
var errs []error
if len(dpdkIds) == 0 {
return errs
}
req := cb_dpdk.ListRequest{}
dpdkList, err := c.CloudBroker().DPDKNet().List(ctx, req)
if err != nil {
errs = append(errs, err)
return errs
}
for _, dpdkId := range dpdkIds {
found := false
for _, dpdk := range dpdkList.Data {
if dpdkId == dpdk.ID {
found = true
break
}
}
if !found {
errs = append(errs, fmt.Errorf("DPDKNet with ID %v not found", dpdkId))
}
}
return errs
}
func ExistExtNetInLb(ctx context.Context, extNetId uint64, c *controller.ControllerCfg) error {
if extNetId == 0 {
return nil

View File

@@ -130,10 +130,6 @@ func resourceCDROMImageDelete(ctx context.Context, d *schema.ResourceData, m int
ImageID: imageData.ID,
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudBroker().Image().DeleteCDROMImage(ctx, req)
if err != nil {
return diag.FromErr(err)

View File

@@ -141,13 +141,6 @@ func resourceImageDelete(ctx context.Context, d *schema.ResourceData, m interfac
ImageID: uint64(d.Get("image_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudBroker().Image().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)

View File

@@ -179,13 +179,6 @@ func resourceImageFromBlankComputeDelete(ctx context.Context, d *schema.Resource
ImageID: uint64(d.Get("image_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudBroker().Image().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)

View File

@@ -186,13 +186,6 @@ func resourceImageFromPlatformDiskDelete(ctx context.Context, d *schema.Resource
ImageID: uint64(d.Get("image_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudBroker().Image().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)

View File

@@ -99,13 +99,6 @@ func resourceVirtualImageDelete(ctx context.Context, d *schema.ResourceData, m i
ImageID: uint64(d.Get("image_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudBroker().Image().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)

View File

@@ -407,6 +407,11 @@ func dataSourceImageListSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "page size",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Description: "find by enabled True or False",
},
"items": {
Type: schema.TypeList,
Computed: true,
@@ -458,7 +463,7 @@ func dataSourceImageListSchemaMake() map[string]*schema.Schema {
"architecture": {
Type: schema.TypeString,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
Description: "binary architecture of this image, one of X86_64",
},
"boot_type": {
Type: schema.TypeString,
@@ -716,7 +721,7 @@ func dataSourceImageSchemaMake() map[string]*schema.Schema {
"architecture": {
Type: schema.TypeString,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
Description: "binary architecture of this image, one of X86_64",
},
"boot_type": {
Type: schema.TypeString,
@@ -922,7 +927,7 @@ func resourceCDROMImageSchemaMake() map[string]*schema.Schema {
},
"drivers": {
Type: schema.TypeList,
Required: true,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
@@ -968,13 +973,7 @@ func resourceCDROMImageSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Whether to completely delete the image",
Description: "binary architecture of this image, one of X86_64",
},
"bootable": {
Type: schema.TypeBool,
@@ -1269,12 +1268,7 @@ func resourceImageSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Description: "Whether to completely delete the image",
Description: "binary architecture of this image, one of X86_64",
},
"bootable": {
Type: schema.TypeBool,
@@ -1313,10 +1307,7 @@ func resourceImageSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
},
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"accounts": {
Type: schema.TypeList,
Optional: true,
@@ -1534,15 +1525,7 @@ func resourceVirtualImageSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
},
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Description: "Whether to completely delete the image",
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"unc_path": {
Type: schema.TypeString,
Computed: true,
@@ -1595,7 +1578,7 @@ func resourceVirtualImageSchemaMake() map[string]*schema.Schema {
"architecture": {
Type: schema.TypeString,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
Description: "binary architecture of this image, one of X86_64",
},
"boot_type": {
Type: schema.TypeString,
@@ -1801,12 +1784,6 @@ func resourceImageFromBlankComputeSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "create an image in async/sync mode",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "whether to completely delete the image",
},
"bootable": {
Type: schema.TypeBool,
Optional: true,
@@ -1838,10 +1815,7 @@ func resourceImageFromBlankComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
},
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"accounts": {
Type: schema.TypeList,
Optional: true,
@@ -2039,7 +2013,7 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
"architecture": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"X86_64", "PPC64_LE"}, true),
ValidateFunc: validation.StringInSlice([]string{"X86_64"}, true),
Description: "Image type linux, windows or other",
},
@@ -2069,11 +2043,10 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
},
"drivers": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Required: true,
Elem: &schema.Schema{
StateFunc: statefuncs.StateFuncToUpper,
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86"}, false), // observe case while validating
Type: schema.TypeString,
},
Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]",
@@ -2096,12 +2069,6 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "create an image in async/sync mode",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "whether to completely delete the image",
},
"bootable": {
Type: schema.TypeBool,
Optional: true,
@@ -2133,10 +2100,7 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
},
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"accounts": {
Type: schema.TypeList,
Optional: true,

View File

@@ -72,13 +72,13 @@ func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData,
if pool, ok := d.GetOk("pool"); ok {
req.Pool = pool.(string)
}
if public, ok := d.GetOk("public"); ok {
if public, ok := d.GetOkExists("public"); ok {
req.Public = public.(bool)
}
if hotResize, ok := d.GetOk("hot_resize"); ok {
if hotResize, ok := d.GetOkExists("hot_resize"); ok {
req.HotResize = hotResize.(bool)
}
if bootable, ok := d.GetOk("bootable"); ok {
if bootable, ok := d.GetOkExists("bootable"); ok {
req.Bootable = bootable.(bool)
}
if sortBy, ok := d.GetOk("sort_by"); ok {
@@ -90,7 +90,9 @@ func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData,
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
if enabled, ok := d.GetOkExists("enabled"); ok {
req.Enabled = enabled.(bool)
}
log.Debugf("utilityImageListCheckPresence: load image list")
imageList, err := c.CloudBroker().Image().List(ctx, req)
if err != nil {

View File

@@ -48,14 +48,6 @@ func SyncCreateRequest(ctx context.Context, d *schema.ResourceData, m interface{
ImageType: d.Get("image_type").(string),
}
c := m.(*controller.ControllerCfg)
if err := ic.ExistGID(ctx, uint64(d.Get("gid").(int)), c); err != nil {
return req, err
}
req.GID = uint64(d.Get("gid").(int))
if _, ok := d.GetOk("drivers"); ok {
drivers := []string{}
for _, driver := range d.Get("drivers").([]interface{}) {

View File

@@ -111,14 +111,16 @@ func dataSourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{
d.Set("kubeconfig", kubeconfig)
getLbReq := lb.GetRequest{LBID: cluster.LBID}
lb, err := c.CloudBroker().LB().Get(ctx, getLbReq)
if err != nil {
return diag.FromErr(err)
}
if cluster.LBID != 0 {
getLbReq := lb.GetRequest{LBID: cluster.LBID}
lb, err := c.CloudBroker().LB().Get(ctx, getLbReq)
if err != nil {
return diag.FromErr(err)
}
d.Set("extnet_id", lb.ExtNetID)
d.Set("lb_ip", lb.PrimaryNode.FrontendIP)
d.Set("extnet_id", lb.ExtNetID)
d.Set("lb_ip", lb.PrimaryNode.FrontendIP)
}
flattenK8sData(d, cluster, masterComputeList, workersComputeList)
return nil

View File

@@ -64,6 +64,9 @@ func flattenResourceK8sCP(d *schema.ResourceData, k8s k8s.RecordK8S, masters []c
d.Set("updated_by", k8s.UpdatedBy)
d.Set("updated_time", k8s.UpdatedTime)
d.Set("network_plugin", k8s.NetworkPlugin)
d.Set("highly_available_lb", k8s.HighlyAvailableLB)
d.Set("address_vip", flattenAddressVIP(k8s.AddressVIP))
d.Set("extnet_only", k8s.ExtnetOnly)
flattenCPParams(d, k8s.K8SGroups.Masters, masters)
}
@@ -100,6 +103,20 @@ func flattenK8sData(d *schema.ResourceData, cluster *k8s.RecordK8S, masters []co
d.Set("tech_status", cluster.TechStatus)
d.Set("updated_by", cluster.UpdatedBy)
d.Set("updated_time", cluster.UpdatedTime)
d.Set("highly_available_lb", cluster.HighlyAvailableLB)
d.Set("address_vip", flattenAddressVIP(cluster.AddressVIP))
d.Set("extnet_only", cluster.ExtnetOnly)
d.Set("with_lb", cluster.WithLB)
}
func flattenAddressVIP(addressVIP k8s.K8SAddressVIP) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"backend_ip": addressVIP.BackendIP,
"frontend_ip": addressVIP.FrontendIP,
}
res = append(res, temp)
return res
}
func flattenAcl(acl k8s.RecordACLGroup) []map[string]interface{} {

View File

@@ -158,6 +158,10 @@ func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interfac
createReq.OidcCertificate = oidcCertificate.(string)
}
if chipset, ok := d.GetOk("chipset"); ok {
createReq.Chipset = chipset.(string)
}
if extNetOnly, ok := d.GetOk("extnet_only"); ok {
createReq.ExtNetOnly = extNetOnly.(bool)
}
@@ -475,10 +479,10 @@ func handleUpdateNum(ctx context.Context, d *schema.ResourceData, c *controller.
oldVal, newVal := d.GetChange("num")
if oldVal.(int) > newVal.(int) {
ids := make([]string, 0)
ids := make([]uint64, 0)
for i := oldVal.(int) - 1; i >= newVal.(int); i-- {
id := k8sData.K8SGroups.Masters.DetailedInfo[i].ID
ids = append(ids, strconv.FormatUint(id, 10))
ids = append(ids, id)
}
req := k8s.DeleteMasterFromGroupRequest{

View File

@@ -65,6 +65,7 @@ func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interfac
WorkerRAM: uint64(d.Get("ram").(int)),
WorkerSEPID: uint64(d.Get("worker_sep_id").(int)),
WorkerSEPPool: d.Get("worker_sep_pool").(string),
Chipset: d.Get("chipset").(string),
}
if d.Get("disk") == nil {
@@ -187,6 +188,7 @@ func resourceK8sWgUpdate(ctx context.Context, d *schema.ResourceData, m interfac
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: wg.ID,
Num: uint64(newNum) - wg.Num,
Chipset: d.Get("chipset").(string),
}
_, err := c.CloudBroker().K8S().WorkerAdd(ctx, req)

View File

@@ -1439,6 +1439,11 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "insert ssl certificate in x509 pem format",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Description: "Type of the emulated system.",
},
"lb_sysctl_params": {
Type: schema.TypeList,
Optional: true,
@@ -1771,6 +1776,11 @@ func resourceK8sWgSchemaMake() map[string]*schema.Schema {
),
Description: "Node RAM in MB.",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Description: "Type of the emulated system.",
},
"disk": {
Type: schema.TypeInt,
Optional: true,

View File

@@ -39,7 +39,6 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
// "github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {

View File

@@ -38,6 +38,7 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute, p
d.Set("boot_disk_id", bootDisk.ID)
// we intentionally use the SizeMax field, do not change it until the BootDiskSize field is fixed on the platform
d.Set("boot_disk_size", bootDisk.SizeMax)
d.Set("chipset", computeRec.Chipset)
d.Set("cd_image_id", computeRec.CdImageId)
d.Set("clone_reference", computeRec.CloneReference)
d.Set("clones", computeRec.Clones)
@@ -156,31 +157,49 @@ func flattenInterfaces(ifaces compute.ListInterfaces) []map[string]interface{} {
for _, iface := range ifaces {
res = append(res, map[string]interface{}{
"conn_id": iface.ConnID,
"conn_type": iface.ConnType,
"def_gw": iface.DefGW,
"enabled": iface.Enabled,
"flip_group_id": iface.FLIPGroupID,
"guid": iface.GUID,
"ip_address": iface.IPAddress,
"listen_ssh": iface.ListenSSH,
"mac": iface.MAC,
"name": iface.Name,
"net_id": iface.NetID,
"netmask": iface.NetMask,
"net_type": iface.NetType,
"node_id": iface.NodeID,
"pci_slot": iface.PCISlot,
"qos": flattenQOS(iface.QOS),
"target": iface.Target,
"type": iface.Type,
"vnfs": iface.VNFs,
"bus_number": iface.BusNumber,
"conn_id": iface.ConnID,
"conn_type": iface.ConnType,
"def_gw": iface.DefGW,
"enabled": iface.Enabled,
"flip_group_id": iface.FLIPGroupID,
"guid": iface.GUID,
"ip_address": iface.IPAddress,
"listen_ssh": iface.ListenSSH,
"mac": iface.MAC,
"mtu": iface.MTU,
"name": iface.Name,
"net_id": iface.NetID,
"netmask": iface.NetMask,
"net_type": iface.NetType,
"node_id": iface.NodeID,
"pci_slot": iface.PCISlot,
"qos": flattenQOS(iface.QOS),
"target": iface.Target,
"type": iface.Type,
"vnfs": iface.VNFs,
"libvirt_settings": flattenLibvirtSettings(iface.LibvirtSettings),
})
}
return res
}
func flattenLibvirtSettings(libvirtSettings compute.LibvirtSettings) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"guid": libvirtSettings.GUID,
"txmode": libvirtSettings.TXMode,
"ioeventfd": libvirtSettings.IOEventFD,
"event_idx": libvirtSettings.EventIDx,
"queues": libvirtSettings.Queues,
"rx_queue_size": libvirtSettings.RXQueueSize,
"tx_queue_size": libvirtSettings.TXQueueSize,
}
res = append(res, temp)
return res
}
func flattenQOS(qos compute.QOS) []map[string]interface{} {
return []map[string]interface{}{
{
@@ -275,6 +294,7 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
"affinity_weight": computeItem.AffinityWeight,
"anti_affinity_rules": flattenListRules(computeItem.AntiAffinityRules),
"arch": computeItem.Arch,
"chipset": computeItem.Chipset,
"cd_image_id": computeItem.CdImageId,
"boot_order": computeItem.BootOrder,
"bootdisk_size": computeItem.BootDiskSize,
@@ -406,8 +426,9 @@ func flattenDisks(disks []compute.InfoDisk) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, disk := range disks {
temp := map[string]interface{}{
"disk_id": disk.ID,
"pci_slot": disk.PCISlot,
"bus_number": disk.BusNumber,
"disk_id": disk.ID,
"pci_slot": disk.PCISlot,
}
res = append(res, temp)
}
@@ -593,6 +614,7 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
d.Set("anti_affinity_rules", flattenAffinityRules(compFacts.AntiAffinityRules))
d.Set("arch", compFacts.Arch)
d.Set("boot_order", compFacts.BootOrder)
d.Set("chipset", compFacts.Chipset)
d.Set("cd_image_id", compFacts.CdImageId)
d.Set("clone_reference", compFacts.CloneReference)
d.Set("clones", compFacts.Clones)
@@ -708,6 +730,7 @@ func flattenDisk(diskList compute.ListDisks) []map[string]interface{} {
"meta": flattens.FlattenMeta(disk.Meta),
"account_id": disk.AccountID,
"boot_partition": disk.BootPartition,
"bus_number": disk.BusNumber,
"created_time": disk.CreatedTime,
"deleted_time": disk.DeletedTime,
"desc": disk.Description,

View File

@@ -2,7 +2,6 @@ package kvmvm
import (
"context"
"errors"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@@ -37,7 +36,7 @@ func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *contro
func existNetworks(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) []error {
var errs []error
var vinsIds, extNetIds, vfpoolIds []uint64
var vinsIds, extNetIds, vfpoolIds, dpdkIds []uint64
networksIface, ok := d.GetOk("network")
if !ok {
@@ -54,10 +53,9 @@ func existNetworks(ctx context.Context, d *schema.ResourceData, c *controller.Co
case "EXTNET":
extNetIds = append(extNetIds, uint64(network["net_id"].(int)))
case "VFNIC":
if d.Get("driver").(string) == "KVM_PPC" {
errs = append(errs, errors.New("'VFNIC' net_type is not allowed for driver 'KVM_PPC'"))
}
vfpoolIds = append(vfpoolIds, uint64(network["net_id"].(int)))
case "DPDK":
dpdkIds = append(dpdkIds, uint64(network["net_id"].(int)))
default:
continue
}
@@ -75,5 +73,9 @@ func existNetworks(ctx context.Context, d *schema.ResourceData, c *controller.Co
errs = append(errs, vfpoolErrs...)
}
if dpdkErrs := ic.ExistDPDKNet(ctx, dpdkIds, c); dpdkErrs != nil {
errs = append(errs, dpdkErrs...)
}
return errs
}

View File

@@ -42,7 +42,6 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/kvmppc"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/kvmx86"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
@@ -55,59 +54,48 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
c := m.(*controller.ControllerCfg)
createReqX86 := kvmx86.CreateRequest{}
createReqPPC := kvmppc.CreateRequest{}
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
if desc, ok := d.GetOk("description"); ok {
createReqPPC.Description = desc.(string)
createReqX86.Description = desc.(string)
}
if sepID, ok := d.GetOk("sep_id"); ok {
createReqPPC.SEPID = uint64(sepID.(int))
createReqX86.SEPID = uint64(sepID.(int))
}
if pool, ok := d.GetOk("pool"); ok {
createReqPPC.Pool = pool.(string)
createReqX86.Pool = pool.(string)
}
if stackID, ok := d.GetOk("stack_id"); ok {
createReqPPC.StackID = uint64(stackID.(int))
createReqX86.StackID = uint64(stackID.(int))
}
if start, ok := d.GetOk("started"); ok {
createReqPPC.Start = start.(bool)
createReqX86.Start = start.(bool)
}
if ipaType, ok := d.GetOk("ipa_type"); ok {
createReqPPC.IPAType = ipaType.(string)
createReqX86.IPAType = ipaType.(string)
}
if bootSize, ok := d.GetOk("boot_disk_size"); ok {
createReqPPC.BootDisk = uint64(bootSize.(int))
createReqX86.BootDisk = uint64(bootSize.(int))
}
if IS, ok := d.GetOk("is"); ok {
createReqPPC.IS = IS.(string)
createReqX86.IS = IS.(string)
}
createReqX86.Interfaces = make([]kvmx86.Interface, 0)
createReqPPC.Interfaces = make([]kvmppc.Interface, 0)
if networks, ok := d.GetOk("network"); ok {
if networks.(*schema.Set).Len() > 0 {
ns := networks.(*schema.Set).List()
log.Debugf("BEFORE SORT %v", ns)
sort.Slice(ns, func(i, j int) bool {
weightI := ns[i].(map[string]interface{})["weight"].(int)
weightJ := ns[j].(map[string]interface{})["weight"].(int)
@@ -119,9 +107,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
return weightI < weightJ
})
log.Debugf("AFTER SORT %v", ns)
interfacesX86 := make([]kvmx86.Interface, 0)
interfacesPPC := make([]kvmppc.Interface, 0)
for _, elem := range ns {
netInterfaceVal := elem.(map[string]interface{})
@@ -139,28 +125,11 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
createReqX86.Interfaces = interfacesX86
for _, elem := range ns {
netInterfaceVal := elem.(map[string]interface{})
reqInterface := kvmppc.Interface{
NetType: netInterfaceVal["net_type"].(string),
NetID: uint64(netInterfaceVal["net_id"].(int)),
}
ipaddr, ipSet := netInterfaceVal["ip_address"]
if ipSet {
reqInterface.IPAddr = ipaddr.(string)
}
interfacesPPC = append(interfacesPPC, reqInterface)
}
createReqPPC.Interfaces = interfacesPPC
}
}
if disks, ok := d.GetOk("disks"); ok {
disksX86 := make([]kvmx86.DataDisk, 0)
disksPPC := make([]kvmppc.DataDisk, 0)
for _, elem := range disks.([]interface{}) {
diskVal := elem.(map[string]interface{})
@@ -185,95 +154,57 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
createReqX86.DataDisks = disksX86
for _, elem := range disks.([]interface{}) {
diskVal := elem.(map[string]interface{})
reqDataDisk := kvmppc.DataDisk{
DiskName: diskVal["disk_name"].(string),
Size: uint64(diskVal["size"].(int)),
}
if sepId, ok := diskVal["sep_id"]; ok {
reqDataDisk.SepID = uint64(sepId.(int))
}
if pool, ok := diskVal["pool"]; ok {
reqDataDisk.Pool = pool.(string)
}
if desc, ok := diskVal["desc"]; ok {
reqDataDisk.Description = desc.(string)
}
if imageID, ok := diskVal["image_id"]; ok {
reqDataDisk.ImageID = uint64(imageID.(int))
}
disksPPC = append(disksPPC, reqDataDisk)
}
createReqPPC.DataDisks = disksPPC
}
if cloudInit, ok := d.GetOk("cloud_init"); ok {
userdata := cloudInit.(string)
if userdata != "" && userdata != "applied" {
createReqPPC.Userdata = strings.TrimSpace(userdata)
createReqX86.Userdata = strings.TrimSpace(userdata)
}
}
var computeId uint64
driver := d.Get("driver").(string)
if driver == "KVM_PPC" {
createReqPPC.RGID = uint64(d.Get("rg_id").(int))
createReqPPC.Name = d.Get("name").(string)
createReqPPC.CPU = uint64(d.Get("cpu").(int))
createReqPPC.RAM = uint64(d.Get("ram").(int))
createReqPPC.ImageID = uint64(d.Get("image_id").(int))
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM PowerPC")
apiResp, err := c.CloudBroker().KVMPPC().Create(ctx, createReqPPC)
if err != nil {
return diag.FromErr(err)
}
createReqX86.RGID = uint64(d.Get("rg_id").(int))
createReqX86.Name = d.Get("name").(string)
createReqX86.CPU = uint64(d.Get("cpu").(int))
createReqX86.RAM = uint64(d.Get("ram").(int))
d.SetId(strconv.FormatUint(apiResp, 10))
computeId = apiResp
} else {
createReqX86.RGID = uint64(d.Get("rg_id").(int))
createReqX86.Name = d.Get("name").(string)
createReqX86.CPU = uint64(d.Get("cpu").(int))
createReqX86.RAM = uint64(d.Get("ram").(int))
if image, ok := d.GetOk("image_id"); ok {
createReqX86.ImageID = uint64(image.(int))
}
if withoutBootDisk, ok := d.GetOk("without_boot_disk"); ok {
createReqX86.WithoutBootDisk = withoutBootDisk.(bool)
}
createReqX86.Driver = driver
if custom_fields, ok := d.GetOk("custom_fields"); ok {
val := custom_fields.(string)
val = strings.ReplaceAll(val, "\\", "")
val = strings.ReplaceAll(val, "\n", "")
val = strings.ReplaceAll(val, "\t", "")
val = strings.TrimSpace(val)
createReqX86.CustomField = val
}
if numaAffinity, ok := d.GetOk("numa_affinity"); ok {
createReqX86.NumaAffinity = numaAffinity.(string)
}
createReqX86.CPUPin = d.Get("cpu_pin").(bool)
createReqX86.HPBacked = d.Get("hp_backed").(bool)
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
apiResp, err := c.CloudBroker().KVMX86().Create(ctx, createReqX86)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(apiResp, 10))
computeId = apiResp
if image, ok := d.GetOk("image_id"); ok {
createReqX86.ImageID = uint64(image.(int))
}
if withoutBootDisk, ok := d.GetOk("without_boot_disk"); ok {
createReqX86.WithoutBootDisk = withoutBootDisk.(bool)
}
createReqX86.Driver = driver
if custom_fields, ok := d.GetOk("custom_fields"); ok {
val := custom_fields.(string)
val = strings.ReplaceAll(val, "\\", "")
val = strings.ReplaceAll(val, "\n", "")
val = strings.ReplaceAll(val, "\t", "")
val = strings.TrimSpace(val)
createReqX86.CustomField = val
}
if numaAffinity, ok := d.GetOk("numa_affinity"); ok {
createReqX86.NumaAffinity = numaAffinity.(string)
}
createReqX86.CPUPin = d.Get("cpu_pin").(bool)
createReqX86.HPBacked = d.Get("hp_backed").(bool)
createReqX86.Chipset = d.Get("chipset").(string)
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
apiResp, err := c.CloudBroker().KVMX86().Create(ctx, createReqX86)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(apiResp, 10))
computeId = apiResp
warnings := dc.Warnings{}
@@ -285,9 +216,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
Permanently: true,
DetachDisks: true,
}
if reason, ok := d.Get("reason").(string); ok {
req.Reason = reason
}
if _, err := c.CloudBroker().Compute().Delete(ctx, req); err != nil {
log.Errorf("resourceComputeCreate: could not delete compute after failed creation: %v", err)
}
@@ -439,9 +368,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
if int64(pfwItem["public_port_end"].(int)) != 0 {
req.PublicPortEnd = int64(pfwItem["public_port_end"].(int))
}
if pfwItem["reason"].(string) != "" {
req.Reason = pfwItem["reason"].(string)
}
pwfId, err := c.CloudBroker().Compute().PFWAdd(ctx, req)
if err != nil {
warnings.Add(err)
@@ -499,9 +426,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
ComputeID: computeId,
CDROMID: uint64(snapshotItem["cdrom_id"].(int)),
}
if snapshotItem["reason"].(string) != "" {
req.Reason = snapshotItem["reason"].(string)
}
_, err := c.CloudBroker().Compute().CDInsert(ctx, req)
if err != nil {
warnings.Add(err)
@@ -560,6 +485,31 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
}
if ars, ok := d.GetOk("libvirt_settings"); ok {
log.Debugf("resourceComputeCreate: Configure libvirt virtio interface parameters on ComputeID: %d", computeId)
settings := ars.(*schema.Set).List()
if len(settings) > 0 {
for _, v := range settings {
settingsConv := v.(map[string]interface{})
req := compute.SetNetConfigRequest{
ComputeID: computeId,
MAC: settingsConv["mac"].(string),
TXMode: settingsConv["txmode"].(string),
IOEventFD: settingsConv["ioeventfd"].(string),
EventIDx: settingsConv["event_idx"].(string),
Queues: uint64(settingsConv["queues"].(int)),
RXQueueSize: uint64(settingsConv["rx_queue_size"].(int)),
TXQueueSize: uint64(settingsConv["tx_queue_size"].(int)),
}
_, err := c.CloudBroker().Compute().SetNetConfig(ctx, req)
if err != nil {
warnings.Add(err)
}
}
}
}
}
log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", computeId, d.Get("name").(string))
@@ -823,6 +773,12 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if d.HasChange("libvirt_settings") {
if err := utilityComputeUpdateLibvirtSettings(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
return append(resourceComputeRead(ctx, d, m), warnings.Get()...)
}

View File

@@ -15,11 +15,6 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Required: true,
Description: "Get compute by id",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "reason for action",
},
"account_id": {
Type: schema.TypeInt,
@@ -146,6 +141,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
},
},
"chipset": {
Type: schema.TypeString,
Computed: true,
},
"cd_image_id": {
Type: schema.TypeInt,
Computed: true,
@@ -225,6 +224,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"bus_number": {
Type: schema.TypeInt,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
@@ -554,6 +557,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"bus_number": {
Type: schema.TypeInt,
Computed: true,
},
"conn_id": {
Type: schema.TypeInt,
Computed: true,
@@ -590,6 +597,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"mtu": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
@@ -614,6 +625,7 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"qos": {
Type: schema.TypeList,
Computed: true,
@@ -638,6 +650,42 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"libvirt_settings": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"txmode": {
Type: schema.TypeString,
Computed: true,
},
"ioeventfd": {
Type: schema.TypeString,
Computed: true,
},
"event_idx": {
Type: schema.TypeString,
Computed: true,
},
"queues": {
Type: schema.TypeInt,
Computed: true,
},
"rx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
"tx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"target": {
Type: schema.TypeString,
Computed: true,
@@ -932,6 +980,16 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "Find by IP address",
},
"stack_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by stack ID",
},
"image_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by image ID",
},
"extnet_name": {
Type: schema.TypeString,
Optional: true,
@@ -1098,6 +1156,10 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"chipset": {
Type: schema.TypeString,
Computed: true,
},
"cd_image_id": {
Type: schema.TypeInt,
Computed: true,
@@ -1158,6 +1220,10 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"bus_number": {
Type: schema.TypeInt,
Computed: true,
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
@@ -1198,6 +1264,10 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"bus_number": {
Type: schema.TypeInt,
Computed: true,
},
"conn_id": {
Type: schema.TypeInt,
Computed: true,
@@ -1234,6 +1304,10 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"mtu": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
@@ -1282,6 +1356,42 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
},
},
},
"libvirt_settings": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"txmode": {
Type: schema.TypeString,
Computed: true,
},
"ioeventfd": {
Type: schema.TypeString,
Computed: true,
},
"event_idx": {
Type: schema.TypeString,
Computed: true,
},
"queues": {
Type: schema.TypeInt,
Computed: true,
},
"rx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
"tx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"target": {
Type: schema.TypeString,
Computed: true,
@@ -2153,10 +2263,6 @@ func dataSourceComputeGetAuditsSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"items": {
Type: schema.TypeList,
@@ -2196,10 +2302,6 @@ func dataSourceComputePfwListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"items": {
Type: schema.TypeList,
@@ -2792,7 +2894,7 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Required: true,
StateFunc: statefuncs.StateFuncToUpper,
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86"}, false), // observe case while validating
Description: "Hardware architecture of this compute instance.",
},
"cpu": {
@@ -2815,6 +2917,12 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "ID of the OS image to base this compute instance on.",
},
"chipset": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Type of the emulated system.",
},
"without_boot_disk": {
Type: schema.TypeBool,
Optional: true,
@@ -2900,7 +3008,7 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Required: true,
StateFunc: statefuncs.StateFuncToUpper,
ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS", "VFNIC"}, false), // observe case while validating
ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS", "VFNIC", "DPDK"}, false), // observe case while validating
Description: "Type of the network for this connection, either EXTNET or VINS.",
},
@@ -2934,11 +3042,7 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "reason for action",
},
"affinity_label": {
Type: schema.TypeString,
Optional: true,
@@ -2981,6 +3085,11 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"delete_async_mode": {
Type: schema.TypeBool,
Computed: true,
Description: "async mode",
},
"anti_affinity_rules": {
Type: schema.TypeList,
Optional: true,
@@ -3092,11 +3201,6 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "reason for action",
},
},
},
},
@@ -3152,11 +3256,6 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "reason for action",
},
},
},
},
@@ -3211,11 +3310,6 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "reason for action",
},
},
},
},
@@ -3317,6 +3411,43 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
Description: "ID of the connected pci devices",
},
"libvirt_settings": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"mac": {
Type: schema.TypeString,
Required: true,
},
"txmode": {
Type: schema.TypeString,
Optional: true,
},
"ioeventfd": {
Type: schema.TypeString,
Optional: true,
},
"event_idx": {
Type: schema.TypeString,
Optional: true,
},
"queues": {
Type: schema.TypeInt,
Optional: true,
},
"rx_queue_size": {
Type: schema.TypeInt,
Optional: true,
},
"tx_queue_size": {
Type: schema.TypeInt,
Optional: true,
},
},
},
Description: "Configure libvirt virtio interface parameters. You can only delete values locally. Data on the platform cannot be deleted.",
},
// Computed properties
"account_id": {
Type: schema.TypeInt,
@@ -3436,6 +3567,10 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"bus_number": {
Type: schema.TypeInt,
Computed: true,
},
"conn_id": {
Type: schema.TypeInt,
Computed: true,
@@ -3472,6 +3607,10 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"mtu": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
@@ -3520,6 +3659,42 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
},
},
"libvirt_settings": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"txmode": {
Type: schema.TypeString,
Computed: true,
},
"ioeventfd": {
Type: schema.TypeString,
Computed: true,
},
"event_idx": {
Type: schema.TypeString,
Computed: true,
},
"queues": {
Type: schema.TypeInt,
Computed: true,
},
"rx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
"tx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"target": {
Type: schema.TypeString,
Computed: true,

View File

@@ -532,10 +532,6 @@ func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m
req.ComputeID = uint64(d.Get("compute_id").(int))
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
res, err := c.CloudBroker().Compute().Get(ctx, req)
if err != nil {
return nil, err
@@ -586,13 +582,16 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
oldSet, newSet := d.GetChange("network")
oldList := oldSet.(*schema.Set).List()
newList := newSet.(*schema.Set).List()
detachMap, changeIpMap, attachMap := differenceNetwork(oldList, newList)
apiErrCount := 0
var lastSavedError error
detachSet := oldSet.(*schema.Set).Difference(newSet.(*schema.Set))
log.Debugf("utilityComputeNetworksConfigure: detach set has %d items for Compute ID %s", detachSet.Len(), d.Id())
for _, runner := range detachSet.List() {
netData := runner.(map[string]interface{})
log.Debugf("utilityComputeNetworksConfigure: detach set has %d items for Compute ID %s", len(detachMap), d.Id())
for _, netData := range detachMap {
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
req := compute.NetDetachRequest{
ComputeID: computeId,
@@ -609,21 +608,40 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
}
}
log.Debugf("utilityComputeNetworksConfigure: changeIp set has %d items for Compute ID %s", len(changeIpMap), d.Id())
for _, netData := range changeIpMap {
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
req := compute.ChangeIPRequest{
ComputeID: computeId,
NetType: netData["net_type"].(string),
NetID: uint64(netData["net_id"].(int)),
IPAddr: netData["ip_address"].(string),
}
_, err := c.CloudBroker().Compute().ChangeIP(ctx, req)
if err != nil {
log.Errorf("utilityComputeNetworksConfigure: failed to change net ID %d of type %s from Compute ID %s: %s",
netData["net_id"].(int), netData["net_type"].(string), d.Id(), err)
apiErrCount++
lastSavedError = err
}
}
needStart := false
if d.Get("network").(*schema.Set).Len() == 1 || oldSet.(*schema.Set).Len() < 1 {
if oldSet.(*schema.Set).Len() == len(detachMap) || oldSet.(*schema.Set).Len() == 0 {
if err := utilityComputeStop(ctx, d, m); err != nil {
apiErrCount++
lastSavedError = err
}
needStart = true
if start := d.Get("started"); start.(bool) {
needStart = true
}
}
attachSet := newSet.(*schema.Set).Difference(oldSet.(*schema.Set))
attachList := attachSet.List()
sort.Slice(attachList, func(i, j int) bool {
weightI := attachList[i].(map[string]interface{})["weight"].(int)
weightJ := attachList[j].(map[string]interface{})["weight"].(int)
sort.Slice(attachMap, func(i, j int) bool {
weightI := attachMap[i]["weight"].(int)
weightJ := attachMap[j]["weight"].(int)
if weightI == 0 {
return false
}
@@ -632,9 +650,8 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
}
return weightI < weightJ
})
log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", attachSet.Len(), d.Id())
for _, runner := range attachList {
netData := runner.(map[string]interface{})
log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", len(attachMap), d.Id())
for _, netData := range attachMap {
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
req := compute.NetAttachRequest{
ComputeID: computeId,
@@ -672,6 +689,53 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
return nil
}
func differenceNetwork(oldList, newList []interface{}) (detachMap, changeIpMap, attachMap []map[string]interface{}) {
attachMap = make([]map[string]interface{}, 0)
changeIpMap = make([]map[string]interface{}, 0)
detachMap = make([]map[string]interface{}, 0)
for _, oldNetwork := range oldList {
oldMap := oldNetwork.(map[string]interface{})
found := false
for _, newNetwork := range newList {
newMap := newNetwork.(map[string]interface{})
if newMap["net_type"] == oldMap["net_type"] && newMap["net_id"] == oldMap["net_id"] && newMap["weight"] == oldMap["weight"] {
if (newMap["net_type"].(string) == "EXTNET" || newMap["net_type"].(string) == "VINS") && newMap["ip_address"] != oldMap["ip_address"] {
changeIpMap = append(changeIpMap, newMap)
found = true
break
} else if newMap["ip_address"] == oldMap["ip_address"] {
found = true
break
}
}
}
if found {
continue
}
detachMap = append(detachMap, oldMap)
}
for _, newNetwork := range newList {
newMap := newNetwork.(map[string]interface{})
found := false
for _, oldNetwork := range oldList {
oldMap := oldNetwork.(map[string]interface{})
if newMap["net_type"] == oldMap["net_type"] && newMap["net_id"] == oldMap["net_id"] && newMap["weight"] == oldMap["weight"] {
if newMap["ip_address"] == oldMap["ip_address"] || ((newMap["net_type"].(string) == "EXTNET" || newMap["net_type"].(string) == "VINS") && newMap["ip_address"] != oldMap["ip_address"]) {
found = true
break
}
}
}
if found {
continue
}
attachMap = append(attachMap, newMap)
}
return
}
func utilityComputeUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
@@ -697,6 +761,10 @@ func utilityComputeUpdate(ctx context.Context, d *schema.ResourceData, m interfa
req.HPBacked = d.Get("hp_backed").(bool)
}
if d.HasChange("chipset") {
req.Chipset = d.Get("chipset").(string)
}
// Note bene: numa_affinity, cpu_pin and hp_backed are not allowed to be changed for compute in STARTED tech status.
// If STARTED, we need to stop it before update
var isStopRequired bool
@@ -947,6 +1015,36 @@ func utilityComputeUpdatePciDevices(ctx context.Context, d *schema.ResourceData,
return nil
}
func utilityComputeUpdateLibvirtSettings(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldSet, newSet := d.GetChange("libvirt_settings")
added := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
if len(added) > 0 {
for _, v := range added {
settingsConv := v.(map[string]interface{})
req := compute.SetNetConfigRequest{
ComputeID: computeId,
MAC: settingsConv["mac"].(string),
TXMode: settingsConv["txmode"].(string),
IOEventFD: settingsConv["ioeventfd"].(string),
EventIDx: settingsConv["event_idx"].(string),
Queues: uint64(settingsConv["queues"].(int)),
RXQueueSize: uint64(settingsConv["rx_queue_size"].(int)),
TXQueueSize: uint64(settingsConv["tx_queue_size"].(int)),
}
_, err := c.CloudBroker().Compute().SetNetConfig(ctx, req)
if err != nil {
return err
}
}
}
return nil
}
func utilityComputeUpdateTags(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
@@ -1013,9 +1111,6 @@ func utilityComputeUpdatePFW(ctx context.Context, d *schema.ResourceData, m inte
} else {
req.PublicPortEnd = uint64(pfwItem["public_port_end"].(int))
}
if pfwItem["reason"].(string) != "" {
req.Reason = pfwItem["reason"].(string)
}
_, err := c.CloudBroker().Compute().PFWDel(ctx, req)
if err != nil {
@@ -1037,9 +1132,6 @@ func utilityComputeUpdatePFW(ctx context.Context, d *schema.ResourceData, m inte
if pfwItem["local_port"].(int) != 0 {
req.LocalBasePort = uint64(pfwItem["local_port"].(int))
}
if pfwItem["reason"].(string) != "" {
req.Reason = pfwItem["reason"].(string)
}
pwfId, err := c.CloudBroker().Compute().PFWAdd(ctx, req)
if err != nil {
@@ -1136,9 +1228,17 @@ func utilityComputeUpdateSnapshot(ctx context.Context, d *schema.ResourceData, m
Label: snapshotItem["label"].(string),
}
_, err := c.CloudBroker().Compute().SnapshotDelete(ctx, req)
if err != nil {
return err
asyncMode, ok := d.GetOk("snapshot_delete_async")
if ok && asyncMode.(bool) {
_, err := c.CloudBroker().Compute().SnapshotDeleteAsync(ctx, req)
if err != nil {
return err
}
} else {
_, err := c.CloudBroker().Compute().SnapshotDelete(ctx, req)
if err != nil {
return err
}
}
}
}
@@ -1218,9 +1318,6 @@ func utilityComputeUpdateCD(ctx context.Context, d *schema.ResourceData, m inter
req := compute.CDEjectRequest{
ComputeID: computeId,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().Compute().CDEject(ctx, req)
if err != nil {
@@ -1235,9 +1332,6 @@ func utilityComputeUpdateCD(ctx context.Context, d *schema.ResourceData, m inter
ComputeID: computeId,
CDROMID: uint64(cdItem["cdrom_id"].(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().Compute().CDInsert(ctx, req)
if err != nil {

View File

@@ -46,10 +46,6 @@ func utilityComputeGetAuditsCheckPresence(ctx context.Context, d *schema.Resourc
ComputeID: uint64(d.Get("compute_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
computeAudits, err := c.CloudBroker().Compute().GetAudits(ctx, req)
if err != nil {
return nil, err

View File

@@ -69,6 +69,12 @@ func utilityDataComputeListCheckPresence(ctx context.Context, d *schema.Resource
if ipAddress, ok := d.GetOk("ip_address"); ok {
req.IPAddress = ipAddress.(string)
}
if stackID, ok := d.GetOk("stack_id"); ok {
req.StackID = stackID.(uint64)
}
if imageID, ok := d.GetOk("image_id"); ok {
req.ImageID = imageID.(uint64)
}
if extNetName, ok := d.GetOk("extnet_name"); ok {
req.ExtNetName = extNetName.(string)
}

View File

@@ -46,10 +46,6 @@ func utilityComputePfwListCheckPresence(ctx context.Context, d *schema.ResourceD
ComputeID: uint64(d.Get("compute_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
listPFWs, err := c.CloudBroker().Compute().PFWList(ctx, req)
if err != nil {
return nil, err

View File

@@ -58,7 +58,7 @@ func resourceLBCreate(ctx context.Context, d *schema.ResourceData, m interface{}
req := lb.CreateRequest{
Name: d.Get("name").(string),
RGID: uint64(d.Get("rg_id").(int)),
ExtNetID: uint64(d.Get("extnet_id").(int)),
ExtNetID: int64(d.Get("extnet_id").(int)),
VINSID: uint64(d.Get("vins_id").(int)),
}
if start, ok := d.GetOk("start"); ok {

View File

@@ -475,7 +475,6 @@ func flattenResourceRG(d *schema.ResourceData, rgData *rg.RecordRG) {
d.Set("gid", rgData.GID)
d.Set("rg_name", rgData.Name)
d.Set("resource_limits", flattenRgResourceLimits(rgData.ResourceLimits))
d.Set("def_net_type", rgData.DefNetType)
d.Set("description", rgData.Description)
d.Set("register_computes", rgData.RegisterComputes)
d.Set("uniq_pools", rgData.UniqPools)

View File

@@ -77,7 +77,6 @@ type ResgroupUpdateParam struct {
Disk int `json:"maxVDiskCapacity"`
Cpu int `json:"maxCPUCapacity"`
NetTraffic int `json:"maxNetworkPeerTransfer"`
Reason string `json:"reason"`
}
type AccountAclRecord struct {

View File

@@ -117,18 +117,10 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
req.DefNet = defNetType.(string)
}
if ipcidr, ok := d.GetOk("ipcidr"); ok {
req.IPCIDR = ipcidr.(string)
}
if description, ok := d.GetOk("description"); ok {
req.Description = description.(string)
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if extNetId, ok := d.GetOk("ext_net_id"); ok {
req.ExtNetID = uint64(extNetId.(int))
}
@@ -211,7 +203,6 @@ func resourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interfa
d.Get("rg_name").(string), d.Get("account_id").(int))
//c := m.(*controller.ControllerCfg)
rgData, err := utilityResgroupCheckPresence(ctx, d, m)
if err != nil {
d.SetId("") // ensure ID is empty
@@ -301,8 +292,7 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
restore, ok := d.GetOk("restore")
if ok && restore.(bool) {
restoreReq := rg.RestoreRequest{
RGID: rgData.ID,
Reason: "automatic restore of resource by terraform",
RGID: rgData.ID,
}
_, err := c.CloudBroker().RG().Restore(ctx, restoreReq)
@@ -316,8 +306,7 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
enable, ok := d.GetOk("enable")
if ok && enable.(bool) {
enableReq := rg.EnableRequest{
RGID: rgData.ID,
Reason: "automatic enable of resource by terraform",
RGID: rgData.ID,
}
_, err = c.CloudBroker().RG().Enable(ctx, enableReq)
@@ -349,20 +338,12 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
/* NOTE: we do not allow changing the following attributes of an existing RG via terraform:
- def_net_type
- ipcidr
- ext_net_id
- ext_ip
The following code fragment checks if any of these have been changed and generates error.
*/
if ok := d.HasChange("def_net"); ok {
_, newDefNet := d.GetChange("def_net")
if newDefNet.(*schema.Set).Len() == 0 {
return diag.Errorf("resourceResgroupUpdate: block def_net must not be empty")
}
}
for _, attr := range []string{"def_net_type", "ipcidr", "ext_ip"} {
for _, attr := range []string{"def_net_type", "ext_ip"} {
attrNew, attrOld := d.GetChange(attr)
if attrNew.(string) != attrOld.(string) {
return diag.FromErr(fmt.Errorf("resourceResgroupUpdate: RG ID %s: changing %s for existing RG is not allowed", d.Id(), attr))
@@ -444,6 +425,9 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
if d.HasChange("uniq_pools") {
uniqPools := d.Get("uniq_pools").([]interface{})
if len(uniqPools) == 0 {
req.ClearUniqPools = true
}
for _, pool := range uniqPools {
req.UniqPools = append(req.UniqPools, pool.(string))
}
@@ -468,6 +452,13 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
}
if d.HasChange("def_net") {
oldDefNet, _ := d.GetChange("def_net")
if oldDefNet.(*schema.Set).Len() > 0 {
_, err := c.CloudBroker().RG().RemoveDefNet(ctx, rg.RemoveDefNetRequest{RGID: rgData.ID})
if err != nil {
return diag.FromErr(err)
}
}
if err := resourceRGSetDefNet(ctx, d, m); err != nil {
return diag.FromErr(err)
}
@@ -520,9 +511,6 @@ func resourceResgroupDelete(ctx context.Context, d *schema.ResourceData, m inter
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
c := m.(*controller.ControllerCfg)
_, err = c.CloudBroker().RG().Delete(ctx, req)
@@ -559,10 +547,6 @@ func resourceRGAccessGrant(ctx context.Context, d *schema.ResourceData, m interf
Right: right,
}
if reason, ok := access["reason"]; ok {
req.Reason = reason.(string)
}
if _, err := c.CloudBroker().RG().AccessGrant(ctx, req); err != nil {
errs = append(errs, err)
}
@@ -593,9 +577,6 @@ func resourceRGSetDefNet(ctx context.Context, d *schema.ResourceData, m interfac
if netID, ok := defNetItem["net_id"]; ok {
req.NetID = uint64(netID.(int))
}
if reason, ok := defNetItem["reason"]; ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().RG().SetDefNet(ctx, req)
return err
@@ -651,10 +632,6 @@ func resourceRGChangeAccess(ctx context.Context, d *schema.ResourceData, m inter
User: user,
}
if reason, ok := deleteItem["reason"]; ok {
reqRevoke.Reason = reason.(string)
}
_, err := c.CloudBroker().RG().AccessRevoke(ctx, reqRevoke)
if err != nil {
return err
@@ -673,10 +650,6 @@ func resourceRGChangeAccess(ctx context.Context, d *schema.ResourceData, m inter
Right: right,
}
if reason, ok := addedItem["reason"]; ok {
reqGrant.Reason = reason.(string)
}
_, err := c.CloudBroker().RG().AccessGrant(ctx, reqGrant)
if err != nil {
return err
@@ -749,6 +722,17 @@ func ResourceResgroup() *schema.Resource {
StateContext: schema.ImportStatePassthroughContext,
},
CustomizeDiff: func(ctx context.Context, diff *schema.ResourceDiff, i interface{}) error {
if diff.HasChange("def_net") {
diff.SetNewComputed("def_net_id")
}
if diff.HasChanges() {
diff.SetNewComputed("updated_by")
diff.SetNewComputed("updated_time")
}
return nil
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout600s,
Read: &constants.Timeout300s,

View File

@@ -11,10 +11,7 @@ func dataSourceRgSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
@@ -739,10 +736,6 @@ func dataSourceRgUsageSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"cpu": {
Type: schema.TypeInt,
@@ -2268,7 +2261,6 @@ func resourceRgSchemaMake() map[string]*schema.Schema {
"def_net_type": {
Type: schema.TypeString,
Optional: true,
Computed: true,
// Default: "PRIVATE",
ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC", "NONE"}, false),
Description: "Type of the network, which this resource group will use as default for its computes - PRIVATE or PUBLIC or NONE.",
@@ -2287,11 +2279,6 @@ func resourceRgSchemaMake() map[string]*schema.Schema {
Description: "User-defined text description of this resource group.",
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"ext_net_id": {
Type: schema.TypeInt,
Optional: true,
@@ -2337,11 +2324,6 @@ func resourceRgSchemaMake() map[string]*schema.Schema {
Required: true,
Description: "Access rights to set, one of 'R', 'RCX' or 'ARCXDU'",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "Reason for action",
},
},
},
},
@@ -2364,11 +2346,6 @@ func resourceRgSchemaMake() map[string]*schema.Schema {
Default: 0,
Description: "Network segment ID. If netType is PUBLIC and netId is 0 then default external network segment will be selected. If netType is PRIVATE and netId=0, the first ViNS defined for this RG will be selected. Otherwise, netId identifies either existing external network segment or ViNS.",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "Reason for action",
},
},
},
},

View File

@@ -50,9 +50,6 @@ func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m
} else {
req.RGID = uint64(d.Get("rg_id").(int))
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
rgData, err := c.CloudBroker().RG().Get(ctx, req)
if err != nil {

View File

@@ -46,10 +46,6 @@ func utilityDataRgUsageCheckPresence(ctx context.Context, d *schema.ResourceData
RGID: uint64(d.Get("rg_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
usage, err := c.CloudBroker().RG().Usage(ctx, req)
if err != nil {
return nil, err

View File

@@ -124,6 +124,7 @@ func flattenUserGetAudits(audits *user.ListAudits) []map[string]interface{} {
"response_time": item.ResponseTime,
"status_code": item.StatusCode,
"time": item.Time,
"guid": item.GUID,
}
res = append(res, temp)
}

View File

@@ -198,6 +198,10 @@ func dataSourceUserGetAuditSchemaMake() map[string]*schema.Schema {
Type: schema.TypeFloat,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
},
},
},

View File

@@ -72,6 +72,7 @@ func flattenVins(d *schema.ResourceData, vinsRecord *vins.RecordVINS) {
d.Set("vnfs", flattenVinsRecordVNFs(vinsRecord.VNFs))
d.Set("vxlan_id", vinsRecord.VXLANID)
d.Set("nat_rule", flattenRuleBlock(vinsRecord.VNFs.NAT.Config.Rules))
d.Set("computes", flattenComputes(vinsRecord.Computes))
}
func flattenVinsData(d *schema.ResourceData, vinsRecord *vins.RecordVINS) {
@@ -109,6 +110,23 @@ func flattenVinsData(d *schema.ResourceData, vinsRecord *vins.RecordVINS) {
d.Set("user_managed", vinsRecord.UserManaged)
d.Set("vnfs", flattenVinsRecordVNFs(vinsRecord.VNFs))
d.Set("vxlan_id", vinsRecord.VXLANID)
d.Set("computes", flattenComputes(vinsRecord.Computes))
}
func flattenLibvirtSettings(libvirtSettings vins.LibvirtSettings) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"guid": libvirtSettings.GUID,
"txmode": libvirtSettings.TXMode,
"ioeventfd": libvirtSettings.IOEventFD,
"event_idx": libvirtSettings.EventIDx,
"queues": libvirtSettings.Queues,
"rx_queue_size": libvirtSettings.RXQueueSize,
"tx_queue_size": libvirtSettings.TXQueueSize,
}
res = append(res, temp)
return res
}
func flattenVinsVNFDev(vd vins.VNFDev) []map[string]interface{} {
@@ -138,6 +156,18 @@ func flattenVinsVNFDev(vd vins.VNFDev) []map[string]interface{} {
return res
}
func flattenComputes(computes []vins.Computes) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computes))
for _, compute := range computes {
tmp := map[string]interface{}{
"id": compute.ID,
"name": compute.Name,
}
res = append(res, tmp)
}
return res
}
func flattenVinsRecordVNFs(rv vins.RecordVNFs) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
@@ -393,25 +423,28 @@ func flattenVinsListInterfaces(i vins.ListInterfaces) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(i))
for _, v := range i {
temp := map[string]interface{}{
"conn_id": v.ConnID,
"conn_type": v.ConnType,
"def_gw": v.DefGW,
"enabled": v.Enabled,
"flipgroup_id": v.FLIPGroupID,
"guid": v.GUID,
"ip_address": v.IPAddress,
"listen_ssh": v.ListenSSH,
"mac": v.MAC,
"name": v.Name,
"net_id": v.NetID,
"net_mask": v.NetMask,
"net_type": v.NetType,
"node_id": v.NodeID,
"pci_slot": v.PCISlot,
"qos": flattenVinsQOS(v.QOS),
"target": v.Target,
"type": v.Type,
"vnfs": v.VNFs,
"conn_id": v.ConnID,
"conn_type": v.ConnType,
"def_gw": v.DefGW,
"enabled": v.Enabled,
"flipgroup_id": v.FLIPGroupID,
"guid": v.GUID,
"ip_address": v.IPAddress,
"listen_ssh": v.ListenSSH,
"mac": v.MAC,
"mtu": v.MTU,
"name": v.Name,
"net_id": v.NetID,
"net_mask": v.NetMask,
"net_type": v.NetType,
"node_id": v.NodeID,
"pci_slot": v.PCISlot,
"bus_number": v.BusNumber,
"qos": flattenVinsQOS(v.QOS),
"target": v.Target,
"type": v.Type,
"vnfs": v.VNFs,
"libvirt_settings": flattenLibvirtSettings(v.LibvirtSettings),
}
res = append(res, temp)
}

View File

@@ -64,9 +64,7 @@ func createVinsInAcc(ctx context.Context, d *schema.ResourceData, m interface{},
if preReservationsNum, ok := d.GetOk("pre_reservations_num"); ok {
req.PreReservationsNum = uint64(preReservationsNum.(int))
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if routesList, ok := d.GetOk("routes"); ok {
var routes []vins.Route
var route vins.Route
@@ -117,9 +115,7 @@ func createVinsInRG(ctx context.Context, d *schema.ResourceData, m interface{},
if preReservationsNum, ok := d.GetOk("pre_reservations_num"); ok {
req.PreReservationsNum = uint64(preReservationsNum.(int))
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if routesList, ok := d.GetOk("routes"); ok {
var routes []vins.Route
var route vins.Route

View File

@@ -343,9 +343,6 @@ func resourceVinsDelete(ctx context.Context, d *schema.ResourceData, m interface
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if _, err := c.CloudBroker().VINS().Delete(ctx, req); err != nil {
return diag.FromErr(err)
@@ -362,9 +359,6 @@ func resourceVinsEnable(ctx context.Context, d *schema.ResourceData, m interface
req := vins.EnableRequest{
VINSID: vinsId,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().VINS().Enable(ctx, req)
return err
@@ -376,9 +370,6 @@ func resourceVinsDisable(ctx context.Context, d *schema.ResourceData, m interfac
req := vins.DisableRequest{
VINSID: vinsId,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().VINS().Disable(ctx, req)
return err
@@ -390,9 +381,6 @@ func resourceVinsRestore(ctx context.Context, d *schema.ResourceData, m interfac
req := vins.RestoreRequest{
VINSID: vinsId,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().VINS().Restore(ctx, req)
return err
@@ -421,9 +409,6 @@ func resourceVinsIpReserve(ctx context.Context, d *schema.ResourceData, m interf
if computeId, ok := ip["compute_id"]; ok {
req.ComputeID = uint64(computeId.(int))
}
if reason, ok := ip["reason"]; ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().VINS().IPReserve(ctx, req)
if err != nil {
@@ -459,9 +444,6 @@ func resourceVinsNatRuleAdd(ctx context.Context, d *schema.ResourceData, m inter
if proto, ok := natRule["proto"]; ok {
req.Proto = proto.(string)
}
if reason, ok := natRule["reason"]; ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().VINS().NATRuleAdd(ctx, req)
if err != nil {
@@ -505,10 +487,6 @@ func resourceVinsChangeExtNetId(ctx context.Context, d *schema.ResourceData, m i
// there was preexisting external net connection - disconnect ViNS
req := vins.ExtNetDisconnectRequest{VINSID: vinsId}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().VINS().ExtNetDisconnect(ctx, req)
return err
}
@@ -521,9 +499,6 @@ func resourceVinsChangeExtNetId(ctx context.Context, d *schema.ResourceData, m i
if ip, ok := d.GetOk("ext_ip"); ok && ip != "" {
req.IP = ip.(string)
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().VINS().ExtNetConnect(ctx, req)
return err
@@ -593,9 +568,6 @@ func resourceVinsChangeIp(ctx context.Context, d *schema.ResourceData, m interfa
if ip["compute_id"].(int) != 0 {
req.ComputeID = uint64(ip["compute_id"].(int))
}
if ip["reason"].(string) != "" {
req.Reason = ip["reason"].(string)
}
_, err := c.CloudBroker().VINS().IPReserve(ctx, req)
if err != nil {
@@ -640,9 +612,6 @@ func resourceVinsChangeNatRule(ctx context.Context, d *schema.ResourceData, m in
VINSID: vinsId,
RuleID: int64(natRule["rule_id"].(int)),
}
if natRule["reason"].(string) != "" {
req.Reason = natRule["reason"].(string)
}
_, err := c.CloudBroker().VINS().NATRuleDel(ctx, req)
errs = append(errs, err)
@@ -738,9 +707,6 @@ func resourceVinsChangeVnfRedeploy(ctx context.Context, d *schema.ResourceData,
_, newRedeploy := d.GetChange("vnfdev_redeploy")
if newRedeploy.(bool) {
req := vins.VNFDevRedeployRequest{VINSID: vinsId}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().VINS().VNFDevRedeploy(ctx, req)
return err
@@ -757,9 +723,6 @@ func resourceVinsChangeVnfRestart(ctx context.Context, d *schema.ResourceData, m
_, newRestart := d.GetChange("vnfdev_restart")
if newRestart.(bool) {
req := vins.VNFDevRestartRequest{VINSID: vinsId}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().VINS().VNFDevRestart(ctx, req)
if err != nil {
@@ -778,9 +741,6 @@ func resourceVinsChangeVnfReset(ctx context.Context, d *schema.ResourceData, m i
_, newRestart := d.GetChange("vnfdev_reset")
if newRestart.(bool) {
req := vins.VNFDevResetRequest{VINSID: vinsId}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().VINS().VNFDevReset(ctx, req)
if err != nil {
@@ -799,9 +759,6 @@ func resourceVinsChangeVnfStartStop(ctx context.Context, d *schema.ResourceData,
_, newStart := d.GetChange("vnfdev_start")
if newStart.(bool) {
req := vins.VNFDevStartRequest{VINSID: vinsId}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().VINS().VNFDevStart(ctx, req)
if err != nil {
@@ -810,9 +767,6 @@ func resourceVinsChangeVnfStartStop(ctx context.Context, d *schema.ResourceData,
}
req := vins.VNFDevStopRequest{VINSID: vinsId}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().VINS().VNFDevStop(ctx, req)
return err

View File

@@ -12,11 +12,7 @@ func dataSourceVinsSchemaMake() map[string]*schema.Schema {
Required: true,
Description: "vins id",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "reason for action",
},
"vnf_dev": {
Type: schema.TypeList,
Computed: true,
@@ -195,6 +191,47 @@ func dataSourceVinsSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "mac",
},
"mtu": {
Type: schema.TypeInt,
Computed: true,
Description: "mtu",
},
"libvirt_settings": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"txmode": {
Type: schema.TypeString,
Computed: true,
},
"ioeventfd": {
Type: schema.TypeString,
Computed: true,
},
"event_idx": {
Type: schema.TypeString,
Computed: true,
},
"queues": {
Type: schema.TypeInt,
Computed: true,
},
"rx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
"tx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"name": {
Type: schema.TypeString,
Computed: true,
@@ -224,6 +261,10 @@ func dataSourceVinsSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "pci slot",
},
"bus_number": {
Type: schema.TypeInt,
Computed: true,
},
"qos": {
Type: schema.TypeList,
Computed: true,
@@ -1445,6 +1486,11 @@ func dataSourceVinsListDeletedSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "Filter by external IP",
},
"vnf_dev_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by VNF Device id",
},
"sort_by": {
Type: schema.TypeString,
Optional: true,
@@ -1796,11 +1842,7 @@ func DataSourceVinsNatRuleListSchemaMake() map[string]*schema.Schema {
Required: true,
Description: "Unique ID of the ViNS. If ViNS ID is specified, then ViNS name, rg_id and account_id are ignored.",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "reason for action",
},
"items": {
Type: schema.TypeList,
Computed: true,
@@ -1967,10 +2009,6 @@ func resourceVinsSchemaMake() map[string]*schema.Schema {
},
},
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
// Additional parameters from CreateInRG
"rg_id": {
@@ -2032,10 +2070,6 @@ func resourceVinsSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Optional: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
},
},
},
@@ -2324,6 +2358,47 @@ func resourceVinsSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "mac",
},
"mtu": {
Type: schema.TypeInt,
Computed: true,
Description: "mtu",
},
"libvirt_settings": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"txmode": {
Type: schema.TypeString,
Computed: true,
},
"ioeventfd": {
Type: schema.TypeString,
Computed: true,
},
"event_idx": {
Type: schema.TypeString,
Computed: true,
},
"queues": {
Type: schema.TypeInt,
Computed: true,
},
"rx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
"tx_queue_size": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"name": {
Type: schema.TypeString,
Computed: true,
@@ -2353,6 +2428,11 @@ func resourceVinsSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "pci slot",
},
"bus_number": {
Type: schema.TypeInt,
Computed: true,
Description: "bus number",
},
"qos": {
Type: schema.TypeList,
Computed: true,

View File

@@ -56,10 +56,6 @@ func utilityVinsCheckPresence(ctx context.Context, d *schema.ResourceData, m int
req.VINSID = uint64(d.Get("vins_id").(int))
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
vins, err := c.CloudBroker().VINS().Get(ctx, req)
if err != nil {
return nil, err

View File

@@ -69,6 +69,9 @@ func utilityVinsListDeletedCheckPresence(ctx context.Context, d *schema.Resource
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
if VNFDevId, ok := d.GetOk("vnfdev_id"); ok {
req.VNFDevId = uint64(VNFDevId.(int))
}
log.Debugf("utilityVinsListDeletedCheckPresence")
vinsList, err := c.CloudBroker().VINS().ListDeleted(ctx, req)

View File

@@ -52,10 +52,6 @@ func utilityVinsNatRuleListCheckPresence(ctx context.Context, d *schema.Resource
req.VINSID = uint64(d.Get("vins_id").(int))
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
natRuleList, err := c.CloudBroker().VINS().NATRuleList(ctx, req)
if err != nil {
return nil, err