4.7.0
This commit is contained in:
@@ -126,6 +126,11 @@ func dataSourceAccountFlipGroupsListSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "Filter by flipgroup ID",
|
||||
},
|
||||
"sort_by": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "sort by one of supported fields, format +|-(field)",
|
||||
},
|
||||
"page": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
|
||||
@@ -315,9 +315,8 @@ func resourceAccountUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
userConv := user.(map[string]interface{})
|
||||
|
||||
req := account.DeleteUserRequest{
|
||||
AccountID: accountId,
|
||||
UserID: userConv["user_id"].(string),
|
||||
RecursiveDelete: userConv["recursive_delete"].(bool),
|
||||
AccountID: accountId,
|
||||
UserID: userConv["user_id"].(string),
|
||||
}
|
||||
_, err := c.CloudAPI().Account().DeleteUser(ctx, req)
|
||||
if err != nil {
|
||||
@@ -381,8 +380,7 @@ func isChangedUser(els []interface{}, el interface{}) bool {
|
||||
elOldConv := elOld.(map[string]interface{})
|
||||
elConv := el.(map[string]interface{})
|
||||
if elOldConv["user_id"].(string) == elConv["user_id"].(string) &&
|
||||
(!strings.EqualFold(elOldConv["access_type"].(string), elConv["access_type"].(string)) ||
|
||||
elOldConv["recursive_delete"].(bool) != elConv["recursive_delete"].(bool)) {
|
||||
(!strings.EqualFold(elOldConv["access_type"].(string), elConv["access_type"].(string))) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -424,11 +422,6 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"recursive_delete": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -72,6 +72,10 @@ func utilityAccountFlipGroupsListCheckPresence(ctx context.Context, d *schema.Re
|
||||
req.ByIP = by_ip.(string)
|
||||
}
|
||||
|
||||
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||
req.SortBy = sortBy.(string)
|
||||
}
|
||||
|
||||
if flipgroup_id, ok := d.GetOk("flipgroup_id"); ok {
|
||||
req.FLIPGroupID = uint64(flipgroup_id.(int))
|
||||
}
|
||||
|
||||
128
internal/service/cloudapi/audit/data_source_audit.go
Normal file
128
internal/service/cloudapi/audit/data_source_audit.go
Normal file
@@ -0,0 +1,128 @@
|
||||
/*
|
||||
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Sergey Kisil, <svkisil@digitalenergy.online>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package audit
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceAuditRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
auditRec, err := utilityAuditCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
d.SetId("") // ensure ID is empty in this case
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
flattenAudit(d, auditRec)
|
||||
d.SetId(d.Get("audit_guid").(string))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func DataSourceAudit() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceAuditRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceAuditSchemaMake(),
|
||||
}
|
||||
}
|
||||
|
||||
func dataSourceAuditSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"audit_guid": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "audit guid",
|
||||
},
|
||||
|
||||
"args": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"call": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"kwargs": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"remote_addr": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"responsetime": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"result": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"status_code": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"tags": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp_end": {
|
||||
Type: schema.TypeFloat,
|
||||
Computed: true,
|
||||
},
|
||||
"user": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
56
internal/service/cloudapi/audit/flattens.go
Normal file
56
internal/service/cloudapi/audit/flattens.go
Normal file
@@ -0,0 +1,56 @@
|
||||
/*
|
||||
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Sergey Kisil, <svkisil@digitalenergy.online>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package audit
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/audit"
|
||||
)
|
||||
|
||||
func flattenAudit(d *schema.ResourceData, au *audit.RecordAudit) {
|
||||
log.Debugf("flattenAudit: decoded audit guid %s", d.Get("audit_guid").(string))
|
||||
|
||||
d.Set("args", au.Arguments)
|
||||
d.Set("call", au.Call)
|
||||
d.Set("guid", au.GUID)
|
||||
d.Set("kwargs", au.Kwargs)
|
||||
d.Set("remote_addr", au.RemoteAddr)
|
||||
d.Set("responsetime", au.ResponseTime)
|
||||
d.Set("result", au.Result)
|
||||
d.Set("status_code", au.StatusCode)
|
||||
d.Set("tags", au.Tags)
|
||||
d.Set("timestamp", au.Timestamp)
|
||||
d.Set("timestamp_end", au.TimestampEnd)
|
||||
d.Set("user", au.User)
|
||||
}
|
||||
62
internal/service/cloudapi/audit/utility_audit.go
Normal file
62
internal/service/cloudapi/audit/utility_audit.go
Normal file
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Sergey Kisil, <svkisil@digitalenergy.online>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package audit
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/audit"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
)
|
||||
|
||||
func utilityAuditCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*audit.RecordAudit, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := audit.GetRequest{}
|
||||
|
||||
if d.Id() != "" {
|
||||
req.AuditGuid = d.Id()
|
||||
} else {
|
||||
req.AuditGuid = d.Get("audit_guid").(string)
|
||||
}
|
||||
|
||||
log.Debugf("utilityStackCheckPresence: load audit")
|
||||
auditInfo, err := c.CloudAPI().Audit().Get(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return auditInfo, nil
|
||||
}
|
||||
@@ -394,7 +394,7 @@ func resourceBasicServiceGroupSchemaMake() map[string]*schema.Schema {
|
||||
"driver": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "compute driver like a KVM_X86, KVM_PPC, etc.",
|
||||
Description: "compute driver like a KVM_X86, etc.",
|
||||
},
|
||||
///4.4.0
|
||||
"sep_id": {
|
||||
|
||||
@@ -238,12 +238,6 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
|
||||
DiskID: disk.ID,
|
||||
}
|
||||
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
req.Reason = reason.(string)
|
||||
} else {
|
||||
req.Reason = "Terraform automatic restore"
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().Disks().Restore(ctx, req)
|
||||
if err != nil {
|
||||
warnings.Add(err)
|
||||
@@ -362,7 +356,6 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface
|
||||
DiskID: disk.ID,
|
||||
Detach: d.Get("detach").(bool),
|
||||
Permanently: d.Get("permanently").(bool),
|
||||
Reason: d.Get("reason").(string),
|
||||
}
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
@@ -437,12 +430,6 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Default: false,
|
||||
Description: "Whether to completely delete the disk, works only with non attached disks",
|
||||
},
|
||||
"reason": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "",
|
||||
Description: "Reason for deletion",
|
||||
},
|
||||
"shareable": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
@@ -677,27 +664,27 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"disk_id": {
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"pool_id": {
|
||||
Type: schema.TypeString,
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"role": {
|
||||
Type: schema.TypeString,
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"self_volume_id": {
|
||||
Type: schema.TypeString,
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_id": {
|
||||
Type: schema.TypeString,
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"volume_id": {
|
||||
Type: schema.TypeString,
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
|
||||
@@ -177,7 +177,6 @@ func resourceDiskReplicationDelete(ctx context.Context, d *schema.ResourceData,
|
||||
DiskID: disk.ID,
|
||||
Detach: d.Get("detach").(bool),
|
||||
Permanently: d.Get("permanently").(bool),
|
||||
Reason: d.Get("reason").(string),
|
||||
}
|
||||
|
||||
log.Debugf("resourceDiskReplicationDelete: delete disk replica for disk with ID: %d", diskId)
|
||||
@@ -243,11 +242,6 @@ func resourceDiskReplicationSchemaMake() map[string]*schema.Schema {
|
||||
Default: false,
|
||||
Description: "Delete disk permanently",
|
||||
},
|
||||
"reason": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Reason for disk deletion",
|
||||
},
|
||||
"replica_disk_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
|
||||
153
internal/service/cloudapi/dpdknet/data_source_dpdk.go
Normal file
153
internal/service/cloudapi/dpdknet/data_source_dpdk.go
Normal file
@@ -0,0 +1,153 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package dpdknet
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceDPDKNetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
dpdk, err := utilityDPDKNetCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
d.SetId("")
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
|
||||
flattenDPDKNet(d, dpdk)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceDPDKNetSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"dpdk_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "The unique ID of the subscriber-owner of the DPDK network",
|
||||
},
|
||||
"account_access": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
Description: "List of accounts with access",
|
||||
},
|
||||
"created_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Created time",
|
||||
},
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Description of DPDK network",
|
||||
},
|
||||
"gid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "ID of the grid (platform)",
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "DPDK network ID on the storage side",
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Name of network",
|
||||
},
|
||||
"rg_access": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
Description: "List of resource groups with access",
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "DPDK network status",
|
||||
},
|
||||
"ovs_bridge": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "OVS bridge in which interfaces for computers created",
|
||||
},
|
||||
"vlan_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "vlan ID",
|
||||
},
|
||||
"compute_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
Description: "Compute IDs which uses this DPDK network",
|
||||
},
|
||||
"updated_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Updated time",
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func DataSourceDPDKNet() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceDPDKNetRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceDPDKNetSchemaMake(),
|
||||
}
|
||||
}
|
||||
212
internal/service/cloudapi/dpdknet/data_source_dpdk_list.go
Normal file
212
internal/service/cloudapi/dpdknet/data_source_dpdk_list.go
Normal file
@@ -0,0 +1,212 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package dpdknet
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func dataSourceDPDKNetListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
dpdkList, err := utilityDPDKNetListCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
d.SetId("")
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
d.Set("items", flattenDPDKNetList(dpdkList))
|
||||
d.Set("entry_count", dpdkList.EntryCount)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceDPDKNetListSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"by_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Find by ID",
|
||||
},
|
||||
"gid": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Find by GID",
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Find by name",
|
||||
},
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Find by description",
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Find by status",
|
||||
},
|
||||
"compute_ids": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
Description: "Find by compute IDs",
|
||||
},
|
||||
"sort_by": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "sort by one of supported fields, format +|-(field)",
|
||||
},
|
||||
"page": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Page number",
|
||||
},
|
||||
"size": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Page size",
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"dpdk_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "The unique ID of the subscriber-owner of the DPDK network",
|
||||
},
|
||||
"account_access": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
Description: "List of accounts with access",
|
||||
},
|
||||
"created_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Created time",
|
||||
},
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Description of DPDK network",
|
||||
},
|
||||
"gid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "ID of the grid (platform)",
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "DPDK network ID on the storage side",
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Name of network",
|
||||
},
|
||||
"rg_access": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
Description: "List of resource groups with access",
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "DPDK network status",
|
||||
},
|
||||
"ovs_bridge": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "OVS bridge in which interfaces for computers created",
|
||||
},
|
||||
"vlan_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "vlan ID",
|
||||
},
|
||||
"compute_ids": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
Description: "Compute IDs which uses this DPDK network",
|
||||
},
|
||||
"updated_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Updated time",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"entry_count": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func DataSourceDPDKNetList() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceDPDKNetListRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceDPDKNetListSchemaMake(),
|
||||
}
|
||||
}
|
||||
44
internal/service/cloudapi/dpdknet/flattens.go
Normal file
44
internal/service/cloudapi/dpdknet/flattens.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package dpdknet
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
dpdk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/dpdknet"
|
||||
)
|
||||
|
||||
func flattenDPDKNet(d *schema.ResourceData, dpdk *dpdk.RecordDPDKNet) {
|
||||
d.Set("dpdk_id", dpdk.ID)
|
||||
d.Set("account_access", dpdk.AccountAccess)
|
||||
d.Set("created_time", dpdk.CreatedTime)
|
||||
d.Set("desc", dpdk.Description)
|
||||
d.Set("gid", dpdk.GID)
|
||||
d.Set("guid", dpdk.GUID)
|
||||
d.Set("name", dpdk.Name)
|
||||
d.Set("rg_access", dpdk.RGAccess)
|
||||
d.Set("status", dpdk.Status)
|
||||
d.Set("ovs_bridge", dpdk.OVSBridge)
|
||||
d.Set("vlan_id", dpdk.VlanID)
|
||||
d.Set("compute_ids", dpdk.ComputeIDs)
|
||||
d.Set("updated_time", dpdk.UpdatedTime)
|
||||
}
|
||||
|
||||
func flattenDPDKNetList(list *dpdk.ListDPDKNet) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(list.Data))
|
||||
for _, dpdk := range list.Data {
|
||||
temp := map[string]interface{}{
|
||||
"dpdk_id": dpdk.ID,
|
||||
"account_access": dpdk.AccountAccess,
|
||||
"desc": dpdk.Description,
|
||||
"gid": dpdk.GID,
|
||||
"guid": dpdk.GUID,
|
||||
"name": dpdk.Name,
|
||||
"rg_access": dpdk.RGAccess,
|
||||
"status": dpdk.Status,
|
||||
"ovs_bridge": dpdk.OVSBridge,
|
||||
"vlan_id": dpdk.VlanID,
|
||||
"compute_ids": dpdk.ComputeIDs,
|
||||
"updated_time": dpdk.UpdatedTime,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
68
internal/service/cloudapi/dpdknet/utility_dpdk.go
Normal file
68
internal/service/cloudapi/dpdknet/utility_dpdk.go
Normal file
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package dpdknet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
log "github.com/sirupsen/logrus"
|
||||
dpdk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/dpdknet"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
)
|
||||
|
||||
func utilityDPDKNetCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*dpdk.RecordDPDKNet, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := dpdk.GetRequest{}
|
||||
|
||||
if d.Get("dpdk_id") != nil {
|
||||
if d.Get("dpdk_id").(int) == 0 {
|
||||
id, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
req.DPDKID = id
|
||||
} else {
|
||||
req.DPDKID = uint64(d.Get("dpdk_id").(int))
|
||||
}
|
||||
} else {
|
||||
id, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
req.DPDKID = id
|
||||
}
|
||||
|
||||
log.Debugf("utilityDPDKCheckPresence: get DPDK network")
|
||||
dpdk, err := c.CloudAPI().DPDKNet().Get(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dpdk, nil
|
||||
}
|
||||
87
internal/service/cloudapi/dpdknet/utility_dpdk_list.go
Normal file
87
internal/service/cloudapi/dpdknet/utility_dpdk_list.go
Normal file
@@ -0,0 +1,87 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package dpdknet
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
dpdk "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/dpdknet"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
)
|
||||
|
||||
func utilityDPDKNetListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*dpdk.ListDPDKNet, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := dpdk.ListRequest{}
|
||||
|
||||
if byID, ok := d.GetOk("by_id"); ok {
|
||||
req.ByID = uint64(byID.(int))
|
||||
}
|
||||
if GID, ok := d.GetOk("gid"); ok {
|
||||
req.GID = uint64(GID.(int))
|
||||
}
|
||||
if name, ok := d.GetOk("name"); ok {
|
||||
req.Name = name.(string)
|
||||
}
|
||||
if desc, ok := d.GetOk("description"); ok {
|
||||
req.Description = desc.(string)
|
||||
}
|
||||
if status, ok := d.GetOk("status"); ok {
|
||||
req.Status = status.(string)
|
||||
}
|
||||
if computeIDs, ok := d.GetOk("compute_ids"); ok {
|
||||
IDs := computeIDs.([]interface{})
|
||||
for _, ID := range IDs {
|
||||
req.ComputeIDs = append(req.ComputeIDs, uint64(ID.(int)))
|
||||
}
|
||||
}
|
||||
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||
req.SortBy = sortBy.(string)
|
||||
}
|
||||
if page, ok := d.GetOk("page"); ok {
|
||||
req.Page = uint64(page.(int))
|
||||
}
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
req.Size = uint64(size.(int))
|
||||
}
|
||||
|
||||
log.Debugf("utilityDPDKListCheckPresence: load DPDK network list")
|
||||
dpdkList, err := c.CloudAPI().DPDKNet().List(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dpdkList, nil
|
||||
}
|
||||
@@ -124,7 +124,7 @@ func dataSourceFlipgroupListSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
Description: "client_ids",
|
||||
},
|
||||
|
||||
@@ -189,7 +189,7 @@ func utilityFlipgroupListCheckPresence(ctx context.Context, d *schema.ResourceDa
|
||||
if cliensId, ok := d.GetOk("client_ids"); ok {
|
||||
cliensIds := cliensId.([]interface{})
|
||||
for _, elem := range cliensIds {
|
||||
req.ClientIDs = append(req.ClientIDs, uint64(elem.(int)))
|
||||
req.ClientIDs = append(req.ClientIDs, (elem.(string)))
|
||||
}
|
||||
}
|
||||
if status, ok := d.GetOk("status"); ok {
|
||||
|
||||
@@ -132,6 +132,11 @@ func dataSourceImageListSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "page size",
|
||||
},
|
||||
"enabled": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Description: "find by enabled True or False",
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
|
||||
@@ -129,8 +129,8 @@ func resourceImageSchemaMake(sch map[string]*schema.Schema) map[string]*schema.S
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{"X86_64", "PPC64_LE"}, true),
|
||||
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
|
||||
ValidateFunc: validation.StringInSlice([]string{"X86_64"}, true),
|
||||
Description: "binary architecture of this image, one of X86_64",
|
||||
}
|
||||
|
||||
sch["drivers"] = &schema.Schema{
|
||||
@@ -141,13 +141,6 @@ func resourceImageSchemaMake(sch map[string]*schema.Schema) map[string]*schema.S
|
||||
},
|
||||
}
|
||||
|
||||
sch["permanently"] = &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
Description: "whether to completely delete the image",
|
||||
}
|
||||
|
||||
sch["network_interface_naming"] = &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
|
||||
@@ -1,67 +1,60 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package image
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
)
|
||||
|
||||
func resourceImageVirtualSchemaMake(sch map[string]*schema.Schema) map[string]*schema.Schema {
|
||||
delete(sch, "show_all")
|
||||
sch["name"] = &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "Name of the rescue disk",
|
||||
}
|
||||
|
||||
sch["link_to"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "ID of real image to link this virtual image to upon creation",
|
||||
}
|
||||
|
||||
sch["permanently"] = &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
Description: "whether to completely delete the image",
|
||||
}
|
||||
|
||||
sch["image_id"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Image id",
|
||||
}
|
||||
|
||||
return sch
|
||||
}
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package image
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
)
|
||||
|
||||
func resourceImageVirtualSchemaMake(sch map[string]*schema.Schema) map[string]*schema.Schema {
|
||||
delete(sch, "show_all")
|
||||
sch["name"] = &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "Name of the rescue disk",
|
||||
}
|
||||
|
||||
sch["link_to"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
Description: "ID of real image to link this virtual image to upon creation",
|
||||
}
|
||||
|
||||
sch["image_id"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Image id",
|
||||
}
|
||||
|
||||
return sch
|
||||
}
|
||||
|
||||
@@ -157,10 +157,6 @@ func resourceImageDelete(ctx context.Context, d *schema.ResourceData, m interfac
|
||||
ImageID: uint64(d.Get("image_id").(int)),
|
||||
}
|
||||
|
||||
if permanently, ok := d.GetOk("permanently"); ok {
|
||||
req.Permanently = permanently.(bool)
|
||||
}
|
||||
|
||||
_, err = c.CloudAPI().Image().Delete(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
||||
@@ -194,10 +194,6 @@ func resourceImageFromBlankComputeDelete(ctx context.Context, d *schema.Resource
|
||||
ImageID: uint64(d.Get("image_id").(int)),
|
||||
}
|
||||
|
||||
if permanently, ok := d.GetOk("permanently"); ok {
|
||||
req.Permanently = permanently.(bool)
|
||||
}
|
||||
|
||||
_, err = c.CloudAPI().Image().Delete(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
@@ -352,12 +348,6 @@ func resourceImageFromBlankComputeSchemaMake() map[string]*schema.Schema {
|
||||
Default: false,
|
||||
Description: "create an image in async/sync mode",
|
||||
},
|
||||
"permanently": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
Description: "whether to completely delete the image",
|
||||
},
|
||||
|
||||
"image_id": {
|
||||
Type: schema.TypeInt,
|
||||
|
||||
@@ -201,10 +201,6 @@ func resourceImageFromPlatformDiskDelete(ctx context.Context, d *schema.Resource
|
||||
ImageID: uint64(d.Get("image_id").(int)),
|
||||
}
|
||||
|
||||
if permanently, ok := d.GetOk("permanently"); ok {
|
||||
req.Permanently = permanently.(bool)
|
||||
}
|
||||
|
||||
_, err = c.CloudAPI().Image().Delete(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
@@ -302,8 +298,8 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
|
||||
"architecture": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{"X86_64", "PPC64_LE"}, true),
|
||||
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
|
||||
ValidateFunc: validation.StringInSlice([]string{"X86_64"}, true),
|
||||
Description: "binary architecture of this image, one of X86_64",
|
||||
},
|
||||
|
||||
"username": {
|
||||
@@ -338,11 +334,10 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
|
||||
},
|
||||
"drivers": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Required: true,
|
||||
Elem: &schema.Schema{
|
||||
StateFunc: statefuncs.StateFuncToUpper,
|
||||
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating
|
||||
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86"}, false), // observe case while validating
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]",
|
||||
@@ -365,12 +360,6 @@ func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
|
||||
Default: false,
|
||||
Description: "create an image in async/sync mode",
|
||||
},
|
||||
"permanently": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
Description: "whether to completely delete the image",
|
||||
},
|
||||
|
||||
"image_id": {
|
||||
Type: schema.TypeInt,
|
||||
|
||||
@@ -46,12 +46,12 @@ func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData,
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := image.ListRequest{}
|
||||
|
||||
if sep_id, ok := d.GetOk("sep_id"); ok {
|
||||
req.SEPID = uint64(sep_id.(int))
|
||||
if sepID, ok := d.GetOk("sep_id"); ok {
|
||||
req.SEPID = uint64(sepID.(int))
|
||||
}
|
||||
|
||||
if by_id, ok := d.GetOk("by_id"); ok {
|
||||
req.ByID = uint64(by_id.(int))
|
||||
if byID, ok := d.GetOk("by_id"); ok {
|
||||
req.ByID = uint64(byID.(int))
|
||||
}
|
||||
|
||||
if name, ok := d.GetOk("name"); ok {
|
||||
@@ -66,31 +66,31 @@ func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData,
|
||||
req.Architecture = architecture.(string)
|
||||
}
|
||||
|
||||
if type_image, ok := d.GetOk("type_image"); ok {
|
||||
req.TypeImage = type_image.(string)
|
||||
if typeImage, ok := d.GetOk("type_image"); ok {
|
||||
req.TypeImage = typeImage.(string)
|
||||
}
|
||||
|
||||
if image_size, ok := d.GetOk("image_size"); ok {
|
||||
req.ImageSize = uint64(image_size.(int))
|
||||
if imageSize, ok := d.GetOk("image_size"); ok {
|
||||
req.ImageSize = uint64(imageSize.(int))
|
||||
}
|
||||
|
||||
if sep_name, ok := d.GetOk("sep_name"); ok {
|
||||
req.SEPName = sep_name.(string)
|
||||
if sepName, ok := d.GetOk("sep_name"); ok {
|
||||
req.SEPName = sepName.(string)
|
||||
}
|
||||
|
||||
if pool, ok := d.GetOk("pool"); ok {
|
||||
req.Pool = pool.(string)
|
||||
}
|
||||
|
||||
if public, ok := d.GetOk("public"); ok {
|
||||
if public, ok := d.GetOkExists("public"); ok {
|
||||
req.Public = public.(bool)
|
||||
}
|
||||
|
||||
if hot_resize, ok := d.GetOk("hot_resize"); ok {
|
||||
req.HotResize = hot_resize.(bool)
|
||||
if hotResize, ok := d.GetOkExists("hot_resize"); ok {
|
||||
req.HotResize = hotResize.(bool)
|
||||
}
|
||||
|
||||
if bootable, ok := d.GetOk("bootable"); ok {
|
||||
if bootable, ok := d.GetOkExists("bootable"); ok {
|
||||
req.Bootable = bootable.(bool)
|
||||
}
|
||||
|
||||
@@ -104,6 +104,9 @@ func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData,
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
req.Size = uint64(size.(int))
|
||||
}
|
||||
if enabled, ok := d.GetOkExists("enabled"); ok {
|
||||
req.Enabled = enabled.(bool)
|
||||
}
|
||||
|
||||
log.Debugf("utilityImageListCheckPresence: load image list")
|
||||
imageList, err := c.CloudAPI().Image().List(ctx, req)
|
||||
|
||||
@@ -103,16 +103,17 @@ func dataSourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{
|
||||
}
|
||||
|
||||
d.Set("kubeconfig", kubeconfig)
|
||||
if cluster.LBID != 0 {
|
||||
getLbReq := lb.GetRequest{LBID: cluster.LBID}
|
||||
lb, err := c.CloudAPI().LB().Get(ctx, getLbReq)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
getLbReq := lb.GetRequest{LBID: cluster.LBID}
|
||||
lb, err := c.CloudAPI().LB().Get(ctx, getLbReq)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
d.Set("extnet_id", lb.ExtNetID)
|
||||
d.Set("lb_ip", lb.PrimaryNode.FrontendIP)
|
||||
}
|
||||
|
||||
d.Set("extnet_id", lb.ExtNetID)
|
||||
d.Set("lb_ip", lb.PrimaryNode.FrontendIP)
|
||||
|
||||
flattenK8sData(d, *cluster, masterComputeList, workersComputeList)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -254,6 +254,20 @@ func flattenK8sData(d *schema.ResourceData, cluster k8s.RecordK8S, masters []com
|
||||
d.Set("tech_status", cluster.TechStatus)
|
||||
d.Set("updated_by", cluster.UpdatedBy)
|
||||
d.Set("updated_time", cluster.UpdatedTime)
|
||||
d.Set("highly_available_lb", cluster.HighlyAvailableLB)
|
||||
d.Set("address_vip", flattenAddressVIP(cluster.AddressVIP))
|
||||
d.Set("extnet_only", cluster.ExtnetOnly)
|
||||
d.Set("with_lb", cluster.WithLB)
|
||||
}
|
||||
|
||||
func flattenAddressVIP(addressVIP k8s.K8SAddressVIP) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"backend_ip": addressVIP.BackendIP,
|
||||
"frontend_ip": addressVIP.FrontendIP,
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenServiceAccount(serviceAccount k8s.RecordServiceAccount) []map[string]interface{} {
|
||||
|
||||
@@ -153,6 +153,11 @@ func workersSchemaMake() map[string]*schema.Schema {
|
||||
Schema: detailedInfoSchemaMake(),
|
||||
},
|
||||
},
|
||||
"chipset": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Type of the emulated system.",
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
|
||||
@@ -202,8 +202,13 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
|
||||
if oidcCertificate, ok := d.GetOk("oidc_cert"); ok {
|
||||
createReq.OidcCertificate = oidcCertificate.(string)
|
||||
}
|
||||
|
||||
///
|
||||
|
||||
if chipset, ok := d.GetOk("chipset"); ok {
|
||||
createReq.Chipset = chipset.(string)
|
||||
}
|
||||
|
||||
createReq.ExtNetOnly = d.Get("extnet_only").(bool)
|
||||
|
||||
if extNet, ok := d.GetOk("extnet_id"); ok {
|
||||
@@ -745,6 +750,12 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
|
||||
Description: "insert ssl certificate in x509 pem format",
|
||||
},
|
||||
|
||||
"chipset": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Type of the emulated system.",
|
||||
},
|
||||
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
|
||||
@@ -124,6 +124,10 @@ func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interfac
|
||||
createReq.MasterSEPID = uint64(sepId.(int))
|
||||
}
|
||||
|
||||
if chipset, ok := d.GetOk("chipset"); ok {
|
||||
createReq.Chipset = chipset.(string)
|
||||
}
|
||||
|
||||
if sepPool, ok := d.GetOk("sep_pool"); ok {
|
||||
createReq.MasterSEPPool = sepPool.(string)
|
||||
}
|
||||
@@ -532,10 +536,10 @@ func resourceK8sCPUpdate(ctx context.Context, d *schema.ResourceData, m interfac
|
||||
oldVal, newVal := d.GetChange("num")
|
||||
|
||||
if oldVal.(int) > newVal.(int) {
|
||||
ids := make([]string, 0)
|
||||
ids := make([]uint64, 0)
|
||||
for i := oldVal.(int) - 1; i >= newVal.(int); i-- {
|
||||
id := cluster.K8SGroups.Masters.DetailedInfo[i].ID
|
||||
ids = append(ids, strconv.FormatUint(id, 10))
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
||||
req := k8s.DeleteMasterFromGroupRequest{
|
||||
@@ -656,6 +660,11 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
|
||||
),
|
||||
Description: "Node RAM in MB.",
|
||||
},
|
||||
"chipset": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Type of the emulated system.",
|
||||
},
|
||||
"disk": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
|
||||
@@ -70,6 +70,7 @@ func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interfac
|
||||
WorkerRAM: uint64(d.Get("ram").(int)),
|
||||
WorkerSEPID: uint64(d.Get("worker_sep_id").(int)),
|
||||
WorkerSEPPool: d.Get("worker_sep_pool").(string),
|
||||
Chipset: d.Get("chipset").(string),
|
||||
}
|
||||
|
||||
labels, _ := d.Get("labels").([]interface{})
|
||||
@@ -168,6 +169,7 @@ func resourceK8sWgUpdate(ctx context.Context, d *schema.ResourceData, m interfac
|
||||
K8SID: uint64(d.Get("k8s_id").(int)),
|
||||
WorkersGroupID: wg.ID,
|
||||
Num: uint64(newNum) - wg.Num,
|
||||
Chipset: d.Get("chipset").(string),
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().K8S().WorkerAdd(ctx, req)
|
||||
@@ -253,6 +255,12 @@ func resourceK8sWgSchemaMake() map[string]*schema.Schema {
|
||||
Description: "Number of worker nodes to create.",
|
||||
},
|
||||
|
||||
"chipset": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Type of the emulated system.",
|
||||
},
|
||||
|
||||
"cpu": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
|
||||
@@ -84,6 +84,7 @@ func updateNum(ctx context.Context, c *controller.ControllerCfg, cluster *k8s.Re
|
||||
K8SID: cluster.ID,
|
||||
WorkersGroupID: uint64(wgNew["id"].(int)),
|
||||
Num: uint64(newNum - oldNum),
|
||||
Chipset: wgNew["chipset"].(string),
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().K8S().WorkerAdd(ctx, req)
|
||||
@@ -145,6 +146,7 @@ func addWGs(ctx context.Context, c *controller.ControllerCfg, cluster *k8s.Recor
|
||||
WorkerCPU: uint64(found_wg["cpu"].(int)),
|
||||
WorkerRAM: uint64(found_wg["ram"].(int)),
|
||||
WorkerDisk: uint64(found_wg["disk"].(int)),
|
||||
Chipset: found_wg["chipset"].(string),
|
||||
}
|
||||
|
||||
labels, _ := found_wg["labels"].([]interface{})
|
||||
|
||||
@@ -260,6 +260,10 @@ func computeListDisksSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"bus_number": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -475,6 +479,10 @@ func computeQOSSchemaMake() map[string]*schema.Schema {
|
||||
|
||||
func computeInterfacesSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"bus_number": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"conn_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -511,6 +519,10 @@ func computeInterfacesSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"mtu": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -542,6 +554,13 @@ func computeInterfacesSchemaMake() map[string]*schema.Schema {
|
||||
Schema: computeQOSSchemaMake(),
|
||||
},
|
||||
},
|
||||
"libvirt_settings": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: computeLibvirtSettingsSchemaMake(),
|
||||
},
|
||||
},
|
||||
"target": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -559,6 +578,40 @@ func computeInterfacesSchemaMake() map[string]*schema.Schema {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func computeLibvirtSettingsSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"txmode": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"ioeventfd": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"event_idx": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"queues": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"rx_queue_size": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"tx_queue_size": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func computeOsUsersSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"guid": {
|
||||
@@ -650,6 +703,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"chipset": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"boot_order": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
|
||||
@@ -64,6 +64,10 @@ func dataSourceComputeListRead(ctx context.Context, d *schema.ResourceData, m in
|
||||
|
||||
func computeDisksSchemaMake() map[string]*schema.Schema {
|
||||
return map[string]*schema.Schema{
|
||||
"bus_number": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@@ -136,6 +140,10 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"chipset": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"clones": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
|
||||
@@ -47,8 +47,9 @@ func flattenDisks(disks []compute.InfoDisk) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, disk := range disks {
|
||||
temp := map[string]interface{}{
|
||||
"disk_id": disk.ID,
|
||||
"pci_slot": disk.PCISlot,
|
||||
"bus_number": disk.BusNumber,
|
||||
"disk_id": disk.ID,
|
||||
"pci_slot": disk.PCISlot,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
@@ -69,30 +70,49 @@ func flattenInterfaces(interfaces compute.ListInterfaces) []map[string]interface
|
||||
res := make([]map[string]interface{}, 0, len(interfaces))
|
||||
for _, interfaceItem := range interfaces {
|
||||
temp := map[string]interface{}{
|
||||
"conn_id": interfaceItem.ConnID,
|
||||
"conn_type": interfaceItem.ConnType,
|
||||
"def_gw": interfaceItem.DefGW,
|
||||
"enabled": interfaceItem.Enabled,
|
||||
"flip_group_id": interfaceItem.FLIPGroupID,
|
||||
"guid": interfaceItem.GUID,
|
||||
"ip_address": interfaceItem.IPAddress,
|
||||
"listen_ssh": interfaceItem.ListenSSH,
|
||||
"mac": interfaceItem.MAC,
|
||||
"name": interfaceItem.Name,
|
||||
"net_id": interfaceItem.NetID,
|
||||
"netmask": interfaceItem.NetMask,
|
||||
"net_type": interfaceItem.NetType,
|
||||
"node_id": interfaceItem.NodeID,
|
||||
"pci_slot": interfaceItem.PCISlot,
|
||||
"qos": flattenQOS(interfaceItem.QOS),
|
||||
"target": interfaceItem.Target,
|
||||
"type": interfaceItem.Type,
|
||||
"vnfs": interfaceItem.VNFs,
|
||||
"bus_number": interfaceItem.BusNumber,
|
||||
"conn_id": interfaceItem.ConnID,
|
||||
"conn_type": interfaceItem.ConnType,
|
||||
"def_gw": interfaceItem.DefGW,
|
||||
"enabled": interfaceItem.Enabled,
|
||||
"flip_group_id": interfaceItem.FLIPGroupID,
|
||||
"guid": interfaceItem.GUID,
|
||||
"ip_address": interfaceItem.IPAddress,
|
||||
"listen_ssh": interfaceItem.ListenSSH,
|
||||
"mac": interfaceItem.MAC,
|
||||
"mtu": interfaceItem.MTU,
|
||||
"name": interfaceItem.Name,
|
||||
"net_id": interfaceItem.NetID,
|
||||
"netmask": interfaceItem.NetMask,
|
||||
"net_type": interfaceItem.NetType,
|
||||
"node_id": interfaceItem.NodeID,
|
||||
"pci_slot": interfaceItem.PCISlot,
|
||||
"qos": flattenQOS(interfaceItem.QOS),
|
||||
"target": interfaceItem.Target,
|
||||
"type": interfaceItem.Type,
|
||||
"vnfs": interfaceItem.VNFs,
|
||||
"libvirt_settings": flattenLibvirtSettings(interfaceItem.LibvirtSettings),
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenLibvirtSettings(libvirtSettings compute.LibvirtSettings) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"guid": libvirtSettings.GUID,
|
||||
"txmode": libvirtSettings.TXMode,
|
||||
"ioeventfd": libvirtSettings.IOEventFD,
|
||||
"event_idx": libvirtSettings.EventIDx,
|
||||
"queues": libvirtSettings.Queues,
|
||||
"rx_queue_size": libvirtSettings.RXQueueSize,
|
||||
"tx_queue_size": libvirtSettings.TXQueueSize,
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenSnapSets(snapSets compute.ListSnapSets) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(snapSets))
|
||||
for _, snapSet := range snapSets {
|
||||
@@ -165,6 +185,7 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
|
||||
"arch": compute.Architecture,
|
||||
"boot_order": compute.BootOrder,
|
||||
"bootdisk_size": compute.BootDiskSize,
|
||||
"chipset": compute.Chipset,
|
||||
"cd_image_id": compute.CdImageId,
|
||||
"clone_reference": compute.CloneReference,
|
||||
"clones": compute.Clones,
|
||||
@@ -348,6 +369,7 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute, pc
|
||||
d.Set("cd_image_id", computeRec.CdImageId)
|
||||
d.Set("sep_id", bootDisk.SepID)
|
||||
d.Set("pool", bootDisk.Pool)
|
||||
d.Set("chipset", computeRec.Chipset)
|
||||
d.Set("clone_reference", computeRec.CloneReference)
|
||||
d.Set("clones", computeRec.Clones)
|
||||
d.Set("computeci_id", computeRec.ComputeCIID)
|
||||
@@ -503,6 +525,7 @@ func flattenListComputeDisks(disks compute.ListComputeDisks) []map[string]interf
|
||||
"acl": string(acl),
|
||||
"account_id": disk.AccountID,
|
||||
"boot_partition": disk.BootPartition,
|
||||
"bus_number": disk.BusNumber,
|
||||
"created_time": disk.CreatedTime,
|
||||
"deleted_time": disk.DeletedTime,
|
||||
"description": disk.Description,
|
||||
@@ -590,6 +613,7 @@ func flattenDataCompute(d *schema.ResourceData, computeRec compute.RecordCompute
|
||||
d.Set("affinity_weight", computeRec.AffinityWeight)
|
||||
d.Set("anti_affinity_rules", flattenListRules(computeRec.AntiAffinityRules))
|
||||
d.Set("arch", computeRec.Architecture)
|
||||
d.Set("chipset", computeRec.Chipset)
|
||||
d.Set("boot_order", computeRec.BootOrder)
|
||||
d.Set("bootdisk_size", computeRec.BootDiskSize)
|
||||
d.Set("cd_image_id", computeRec.CdImageId)
|
||||
|
||||
@@ -128,7 +128,7 @@ func networkSubresourceSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
StateFunc: statefuncs.StateFuncToUpper,
|
||||
ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS", "VFNIC"}, false), // observe case while validating
|
||||
ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS", "VFNIC", "DPDK"}, false), // observe case while validating
|
||||
Description: "Type of the network for this connection, either EXTNET or VINS.",
|
||||
},
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ func resourceComputeResourceV1() *schema.Resource {
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
StateFunc: statefuncs.StateFuncToUpper,
|
||||
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating
|
||||
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86"}, false), // observe case while validating
|
||||
Description: "Hardware architecture of this compute instance.",
|
||||
},
|
||||
"cpu": {
|
||||
@@ -569,7 +569,7 @@ func resourceComputeResourceV2() *schema.Resource {
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
StateFunc: statefuncs.StateFuncToUpper,
|
||||
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating
|
||||
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86"}, false), // observe case while validating
|
||||
Description: "Hardware architecture of this compute instance.",
|
||||
},
|
||||
"cpu": {
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/dpdknet"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/extnet"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/image"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||
@@ -147,6 +148,23 @@ func existVFPoolId(ctx context.Context, m interface{}, id int) (int, bool) {
|
||||
return id, false
|
||||
}
|
||||
|
||||
func existDPDKNetId(ctx context.Context, m interface{}, id int) (int, bool) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := dpdknet.ListRequest{ByID: uint64(id)}
|
||||
|
||||
dpdkList, err := c.CloudAPI().DPDKNet().List(ctx, req)
|
||||
if err != nil {
|
||||
log.Debugf("Unable to retrieve vfpool list, %s", err)
|
||||
return id, false
|
||||
}
|
||||
|
||||
if len(dpdkList.Data) == 1 {
|
||||
return 0, true
|
||||
}
|
||||
|
||||
return id, false
|
||||
}
|
||||
|
||||
func isMoreThanOneDisksTypeB(ctx context.Context, disks interface{}) bool {
|
||||
count := 0
|
||||
|
||||
|
||||
@@ -42,7 +42,6 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/kvmppc"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/kvmx86"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
@@ -60,7 +59,6 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
log.Debugf("resourceComputeCreate: called for Compute name %q, RG ID %d", d.Get("name").(string), d.Get("rg_id").(int))
|
||||
c := m.(*controller.ControllerCfg)
|
||||
createReqX86 := kvmx86.CreateRequest{}
|
||||
createReqPPC := kvmppc.CreateRequest{}
|
||||
|
||||
hasRG, err := existRgID(ctx, d, m)
|
||||
if err != nil {
|
||||
@@ -101,12 +99,13 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
return diag.Errorf("resourceComputeCreate: can't create compute because extnet ID %d is not allowed or does not exist", extNetId)
|
||||
}
|
||||
case "VFNIC":
|
||||
if d.Get("driver").(string) == "KVM_PPC" {
|
||||
return diag.Errorf("resourceComputeCreate: can't create compute because 'VFNIC' net_type is not allowed for driver 'KVM_PPC'")
|
||||
}
|
||||
if vfpoolId, ok := existVFPoolId(ctx, m, networkData["net_id"].(int)); !ok {
|
||||
return diag.Errorf("resourceComputeCreate: can't create compute because vfpool ID %d is not allowed or does not exist", vfpoolId)
|
||||
}
|
||||
case "DPDK":
|
||||
if dpdkId, ok := existDPDKNetId(ctx, m, networkData["net_id"].(int)); !ok {
|
||||
return diag.Errorf("resourceComputeCreate: can't create compute because DPDK ID %d is not allowed or does not exist", dpdkId)
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
@@ -115,32 +114,26 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
|
||||
argVal, ok := d.GetOk("description")
|
||||
if ok {
|
||||
createReqPPC.Description = argVal.(string)
|
||||
createReqX86.Description = argVal.(string)
|
||||
}
|
||||
|
||||
if sepID, ok := d.GetOk("sep_id"); ok {
|
||||
createReqPPC.SEPID = uint64(sepID.(int))
|
||||
createReqX86.SepID = uint64(sepID.(int))
|
||||
}
|
||||
|
||||
if pool, ok := d.GetOk("pool"); ok {
|
||||
createReqPPC.Pool = pool.(string)
|
||||
createReqX86.Pool = pool.(string)
|
||||
}
|
||||
|
||||
if ipaType, ok := d.GetOk("ipa_type"); ok {
|
||||
createReqPPC.IPAType = ipaType.(string)
|
||||
createReqX86.IPAType = ipaType.(string)
|
||||
}
|
||||
|
||||
if bootSize, ok := d.GetOk("boot_disk_size"); ok {
|
||||
createReqPPC.BootDisk = uint64(bootSize.(int))
|
||||
createReqX86.BootDisk = uint64(bootSize.(int))
|
||||
}
|
||||
|
||||
if IS, ok := d.GetOk("is"); ok {
|
||||
createReqPPC.IS = IS.(string)
|
||||
createReqX86.IS = IS.(string)
|
||||
}
|
||||
|
||||
@@ -181,45 +174,8 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
}
|
||||
|
||||
createReqPPC.Interfaces = make([]kvmppc.Interface, 0)
|
||||
|
||||
if networks, ok := d.GetOk("network"); ok {
|
||||
if networks.(*schema.Set).Len() > 0 {
|
||||
ns := networks.(*schema.Set).List()
|
||||
sort.Slice(ns, func(i, j int) bool {
|
||||
weightI := ns[i].(map[string]interface{})["weight"].(int)
|
||||
weightJ := ns[j].(map[string]interface{})["weight"].(int)
|
||||
if weightI == 0 {
|
||||
return false
|
||||
}
|
||||
if weightJ == 0 {
|
||||
return true
|
||||
}
|
||||
return weightI < weightJ
|
||||
})
|
||||
interfaces := make([]kvmppc.Interface, 0)
|
||||
for _, elem := range ns {
|
||||
netInterfaceVal := elem.(map[string]interface{})
|
||||
reqInterface := kvmppc.Interface{
|
||||
NetType: netInterfaceVal["net_type"].(string),
|
||||
NetID: uint64(netInterfaceVal["net_id"].(int)),
|
||||
}
|
||||
|
||||
ipaddr, ipSet := netInterfaceVal["ip_address"]
|
||||
if ipSet {
|
||||
reqInterface.IPAddr = ipaddr.(string)
|
||||
}
|
||||
|
||||
interfaces = append(interfaces, reqInterface)
|
||||
}
|
||||
|
||||
createReqPPC.Interfaces = interfaces
|
||||
}
|
||||
}
|
||||
|
||||
if disks, ok := d.GetOk("disks"); ok {
|
||||
disksX86 := make([]kvmx86.DataDisk, 0)
|
||||
disksPPC := make([]kvmppc.DataDisk, 0)
|
||||
|
||||
for _, elem := range disks.([]interface{}) {
|
||||
diskVal := elem.(map[string]interface{})
|
||||
@@ -244,96 +200,58 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
|
||||
createReqX86.DataDisks = disksX86
|
||||
|
||||
for _, elem := range disks.([]interface{}) {
|
||||
diskVal := elem.(map[string]interface{})
|
||||
reqDataDisk := kvmppc.DataDisk{
|
||||
DiskName: diskVal["disk_name"].(string),
|
||||
Size: uint64(diskVal["size"].(int)),
|
||||
}
|
||||
if sepId, ok := diskVal["sep_id"]; ok {
|
||||
reqDataDisk.SepID = uint64(sepId.(int))
|
||||
}
|
||||
if pool, ok := diskVal["pool"]; ok {
|
||||
reqDataDisk.Pool = pool.(string)
|
||||
}
|
||||
if desc, ok := diskVal["desc"]; ok {
|
||||
reqDataDisk.Description = desc.(string)
|
||||
}
|
||||
if imageID, ok := diskVal["image_id"]; ok {
|
||||
reqDataDisk.ImageID = uint64(imageID.(int))
|
||||
}
|
||||
disksPPC = append(disksPPC, reqDataDisk)
|
||||
}
|
||||
|
||||
createReqPPC.DataDisks = disksPPC
|
||||
}
|
||||
|
||||
argVal, ok = d.GetOk("cloud_init")
|
||||
if ok {
|
||||
userdata := argVal.(string)
|
||||
if userdata != "" && userdata != "applied" {
|
||||
createReqPPC.Userdata = strings.TrimSpace(userdata)
|
||||
createReqX86.Userdata = strings.TrimSpace(userdata)
|
||||
}
|
||||
}
|
||||
|
||||
var computeId uint64
|
||||
driver := d.Get("driver").(string)
|
||||
if driver == "KVM_PPC" {
|
||||
createReqPPC.RGID = uint64(d.Get("rg_id").(int))
|
||||
createReqPPC.Name = d.Get("name").(string)
|
||||
createReqPPC.CPU = uint64(d.Get("cpu").(int))
|
||||
createReqPPC.RAM = uint64(d.Get("ram").(int))
|
||||
createReqPPC.ImageID = uint64(d.Get("image_id").(int))
|
||||
|
||||
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM PowerPC")
|
||||
apiResp, err := c.CloudAPI().KVMPPC().Create(ctx, createReqPPC)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
createReqX86.RGID = uint64(d.Get("rg_id").(int))
|
||||
createReqX86.Name = d.Get("name").(string)
|
||||
createReqX86.CPU = uint64(d.Get("cpu").(int))
|
||||
createReqX86.RAM = uint64(d.Get("ram").(int))
|
||||
|
||||
d.SetId(strconv.FormatUint(apiResp, 10))
|
||||
computeId = apiResp
|
||||
} else {
|
||||
createReqX86.RGID = uint64(d.Get("rg_id").(int))
|
||||
createReqX86.Name = d.Get("name").(string)
|
||||
createReqX86.CPU = uint64(d.Get("cpu").(int))
|
||||
createReqX86.RAM = uint64(d.Get("ram").(int))
|
||||
createReqX86.Driver = driver
|
||||
|
||||
createReqX86.Driver = driver
|
||||
|
||||
if image, ok := d.GetOk("image_id"); ok {
|
||||
createReqX86.ImageID = uint64(image.(int))
|
||||
}
|
||||
if withoutBootDisk, ok := d.GetOk("without_boot_disk"); ok {
|
||||
createReqX86.WithoutBootDisk = withoutBootDisk.(bool)
|
||||
}
|
||||
|
||||
if custom_fields, ok := d.GetOk("custom_fields"); ok {
|
||||
val := custom_fields.(string)
|
||||
val = strings.ReplaceAll(val, "\\", "")
|
||||
val = strings.ReplaceAll(val, "\n", "")
|
||||
val = strings.ReplaceAll(val, "\t", "")
|
||||
val = strings.TrimSpace(val)
|
||||
|
||||
createReqX86.CustomFields = val
|
||||
}
|
||||
|
||||
if numaAffinity, ok := d.GetOk("numa_affinity"); ok {
|
||||
createReqX86.NumaAffinity = numaAffinity.(string)
|
||||
}
|
||||
createReqX86.CPUPin = d.Get("cpu_pin").(bool)
|
||||
createReqX86.HPBacked = d.Get("hp_backed").(bool)
|
||||
|
||||
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
|
||||
apiResp, err := c.CloudAPI().KVMX86().Create(ctx, createReqX86)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.SetId(strconv.FormatUint(apiResp, 10))
|
||||
computeId = apiResp
|
||||
if image, ok := d.GetOk("image_id"); ok {
|
||||
createReqX86.ImageID = uint64(image.(int))
|
||||
}
|
||||
if withoutBootDisk, ok := d.GetOk("without_boot_disk"); ok {
|
||||
createReqX86.WithoutBootDisk = withoutBootDisk.(bool)
|
||||
}
|
||||
|
||||
if custom_fields, ok := d.GetOk("custom_fields"); ok {
|
||||
val := custom_fields.(string)
|
||||
val = strings.ReplaceAll(val, "\\", "")
|
||||
val = strings.ReplaceAll(val, "\n", "")
|
||||
val = strings.ReplaceAll(val, "\t", "")
|
||||
val = strings.TrimSpace(val)
|
||||
|
||||
createReqX86.CustomFields = val
|
||||
}
|
||||
|
||||
if numaAffinity, ok := d.GetOk("numa_affinity"); ok {
|
||||
createReqX86.NumaAffinity = numaAffinity.(string)
|
||||
}
|
||||
createReqX86.CPUPin = d.Get("cpu_pin").(bool)
|
||||
createReqX86.HPBacked = d.Get("hp_backed").(bool)
|
||||
createReqX86.Chipset = d.Get("chipset").(string)
|
||||
|
||||
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
|
||||
apiResp, err := c.CloudAPI().KVMX86().Create(ctx, createReqX86)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.SetId(strconv.FormatUint(apiResp, 10))
|
||||
computeId = apiResp
|
||||
|
||||
warnings := dc.Warnings{}
|
||||
|
||||
@@ -730,12 +648,13 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
return diag.Errorf("resourceComputeUpdate: can't update compute because extnet ID %d is not allowed or does not exist", extNetId)
|
||||
}
|
||||
case "VFNIC":
|
||||
if d.Get("driver").(string) == "KVM_PPC" {
|
||||
return diag.Errorf("resourceComputeUpdate: can't create compute because 'VFNIC' net_type is not allowed for driver 'KVM_PPC'")
|
||||
}
|
||||
if vfpoolId, ok := existVFPoolId(ctx, m, networkData["net_id"].(int)); !ok {
|
||||
return diag.Errorf("resourceComputeUpdate: can't create compute because vfpool ID %d is not allowed or does not exist", vfpoolId)
|
||||
}
|
||||
case "DPDK":
|
||||
if dpdkId, ok := existDPDKNetId(ctx, m, networkData["net_id"].(int)); !ok {
|
||||
return diag.Errorf("resourceComputeCreate: can't create compute because DPDK ID %d is not allowed or does not exist", dpdkId)
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
@@ -960,6 +879,10 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
req.HPBacked = d.Get("hp_backed").(bool)
|
||||
}
|
||||
|
||||
if d.HasChange("chipset") {
|
||||
req.Chipset = d.Get("chipset").(string)
|
||||
}
|
||||
|
||||
// Note bene: numa_affinity, cpu_pin and hp_backed are not allowed to be changed for compute in STARTED tech status.
|
||||
// If STARTED, we need to stop it before update
|
||||
var isStopRequired bool
|
||||
@@ -1449,10 +1372,19 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
Label: snapshotItem["label"].(string),
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().Compute().SnapshotDelete(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
asyncMode, ok := d.GetOk("snapshot_delete_async")
|
||||
if ok && asyncMode.(bool) {
|
||||
_, err := c.CloudAPI().Compute().SnapshotDeleteAsync(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
} else {
|
||||
_, err := c.CloudAPI().Compute().SnapshotDelete(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1924,7 +1856,7 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
||||
Required: true,
|
||||
// ForceNew: true,
|
||||
StateFunc: statefuncs.StateFuncToUpper,
|
||||
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating
|
||||
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86"}, false), // observe case while validating
|
||||
Description: "Hardware architecture of this compute instance.",
|
||||
},
|
||||
"cpu": {
|
||||
@@ -1948,6 +1880,12 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
||||
//ForceNew: true, //REDEPLOY
|
||||
Description: "ID of the OS image to base this compute instance on.",
|
||||
},
|
||||
"chipset": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Description: "Type of the emulated system.",
|
||||
},
|
||||
"without_boot_disk": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
@@ -2124,6 +2062,10 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
||||
Schema: snapshotSubresourceSchemaMake(),
|
||||
},
|
||||
},
|
||||
"snapshot_delete_async": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
},
|
||||
"rollback": {
|
||||
Type: schema.TypeSet,
|
||||
MaxItems: 1,
|
||||
|
||||
@@ -177,13 +177,16 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
|
||||
oldSet, newSet := d.GetChange("network")
|
||||
|
||||
oldList := oldSet.(*schema.Set).List()
|
||||
newList := newSet.(*schema.Set).List()
|
||||
|
||||
detachMap, changeIpMap, attachMap := differenceNetwork(oldList, newList)
|
||||
|
||||
apiErrCount := 0
|
||||
var lastSavedError error
|
||||
|
||||
detachSet := oldSet.(*schema.Set).Difference(newSet.(*schema.Set))
|
||||
log.Debugf("utilityComputeNetworksConfigure: detach set has %d items for Compute ID %s", detachSet.Len(), d.Id())
|
||||
for _, runner := range detachSet.List() {
|
||||
netData := runner.(map[string]interface{})
|
||||
log.Debugf("utilityComputeNetworksConfigure: detach set has %d items for Compute ID %s", len(detachMap), d.Id())
|
||||
for _, netData := range detachMap {
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
req := compute.NetDetachRequest{
|
||||
ComputeID: computeId,
|
||||
@@ -200,22 +203,41 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("utilityComputeNetworksConfigure: changeIp set has %d items for Compute ID %s", len(changeIpMap), d.Id())
|
||||
for _, netData := range changeIpMap {
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
req := compute.ChangeIPRequest{
|
||||
ComputeID: computeId,
|
||||
NetType: netData["net_type"].(string),
|
||||
NetID: uint64(netData["net_id"].(int)),
|
||||
IPAddr: netData["ip_address"].(string),
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().Compute().ChangeIP(ctx, req)
|
||||
if err != nil {
|
||||
log.Errorf("utilityComputeNetworksConfigure: failed to change net ID %d of type %s from Compute ID %s: %s",
|
||||
netData["net_id"].(int), netData["net_type"].(string), d.Id(), err)
|
||||
apiErrCount++
|
||||
lastSavedError = err
|
||||
}
|
||||
}
|
||||
|
||||
needStart := false
|
||||
|
||||
if d.Get("network").(*schema.Set).Len() == 1 || oldSet.(*schema.Set).Len() < 1 {
|
||||
if oldSet.(*schema.Set).Len() == len(detachMap) || oldSet.(*schema.Set).Len() == 0 {
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
if err := utilityComputeStop(ctx, computeId, m); err != nil {
|
||||
apiErrCount++
|
||||
lastSavedError = err
|
||||
}
|
||||
needStart = true
|
||||
if start := d.Get("started"); start.(bool) {
|
||||
needStart = true
|
||||
}
|
||||
}
|
||||
|
||||
attachSet := newSet.(*schema.Set).Difference(oldSet.(*schema.Set))
|
||||
attachList := attachSet.List()
|
||||
sort.Slice(attachList, func(i, j int) bool {
|
||||
weightI := attachList[i].(map[string]interface{})["weight"].(int)
|
||||
weightJ := attachList[j].(map[string]interface{})["weight"].(int)
|
||||
sort.Slice(attachMap, func(i, j int) bool {
|
||||
weightI := attachMap[i]["weight"].(int)
|
||||
weightJ := attachMap[j]["weight"].(int)
|
||||
if weightI == 0 {
|
||||
return false
|
||||
}
|
||||
@@ -224,9 +246,8 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
}
|
||||
return weightI < weightJ
|
||||
})
|
||||
log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", attachSet.Len(), d.Id())
|
||||
for _, runner := range attachList {
|
||||
netData := runner.(map[string]interface{})
|
||||
log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", len(attachMap), d.Id())
|
||||
for _, netData := range attachMap {
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
req := compute.NetAttachRequest{
|
||||
ComputeID: computeId,
|
||||
@@ -343,3 +364,50 @@ func utilityComputeUpdatePciDevices(ctx context.Context, d *schema.ResourceData,
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func differenceNetwork(oldList, newList []interface{}) (detachMap, changeIpMap, attachMap []map[string]interface{}) {
|
||||
attachMap = make([]map[string]interface{}, 0)
|
||||
changeIpMap = make([]map[string]interface{}, 0)
|
||||
detachMap = make([]map[string]interface{}, 0)
|
||||
for _, oldNetwork := range oldList {
|
||||
oldMap := oldNetwork.(map[string]interface{})
|
||||
found := false
|
||||
for _, newNetwork := range newList {
|
||||
newMap := newNetwork.(map[string]interface{})
|
||||
if newMap["net_type"] == oldMap["net_type"] && newMap["net_id"] == oldMap["net_id"] && newMap["weight"] == oldMap["weight"] {
|
||||
if (newMap["net_type"].(string) == "EXTNET" || newMap["net_type"].(string) == "VINS") && newMap["ip_address"] != oldMap["ip_address"] {
|
||||
changeIpMap = append(changeIpMap, newMap)
|
||||
found = true
|
||||
break
|
||||
} else if newMap["ip_address"] == oldMap["ip_address"] {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
detachMap = append(detachMap, oldMap)
|
||||
}
|
||||
|
||||
for _, newNetwork := range newList {
|
||||
newMap := newNetwork.(map[string]interface{})
|
||||
found := false
|
||||
for _, oldNetwork := range oldList {
|
||||
oldMap := oldNetwork.(map[string]interface{})
|
||||
if newMap["net_type"] == oldMap["net_type"] && newMap["net_id"] == oldMap["net_id"] && newMap["weight"] == oldMap["weight"] {
|
||||
if newMap["ip_address"] == oldMap["ip_address"] || ((newMap["net_type"].(string) == "EXTNET" || newMap["net_type"].(string) == "VINS") && newMap["ip_address"] != oldMap["ip_address"]) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
attachMap = append(attachMap, newMap)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -280,10 +280,7 @@ func dataSourceRgSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"reason": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
|
||||
@@ -59,10 +59,6 @@ func dataSourceRgUsageSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"reason": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"cpu": {
|
||||
Type: schema.TypeInt,
|
||||
|
||||
@@ -89,7 +89,6 @@ func flattenResgroup(d *schema.ResourceData, details rg.RecordResourceGroup) err
|
||||
|
||||
d.Set("account_id", details.AccountID)
|
||||
d.Set("gid", details.GID)
|
||||
d.Set("def_net_type", details.DefNetType)
|
||||
d.Set("name", details.Name)
|
||||
d.Set("quota", flattenQuota(details.ResourceLimits))
|
||||
d.Set("account_name", details.AccountName)
|
||||
|
||||
@@ -45,7 +45,6 @@ import (
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
)
|
||||
@@ -139,8 +138,6 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
|
||||
}
|
||||
if defNetType, ok := d.GetOk("def_net_type"); ok {
|
||||
req.DefNet = defNetType.(string) // NOTE: in API default network type is set by "def_net" parameter
|
||||
} else {
|
||||
d.Set("def_net_type", "PRIVATE")
|
||||
}
|
||||
|
||||
if owner, ok := d.GetOk("owner"); ok {
|
||||
@@ -155,10 +152,6 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
|
||||
req.Description = description.(string)
|
||||
}
|
||||
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
req.Reason = reason.(string)
|
||||
}
|
||||
|
||||
if extNetId, ok := d.GetOk("ext_net_id"); ok {
|
||||
req.ExtNetID = uint64(extNetId.(int))
|
||||
}
|
||||
@@ -195,10 +188,6 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
|
||||
Right: right,
|
||||
}
|
||||
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
req.Reason = reason.(string)
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().RG().AccessGrant(ctx, req)
|
||||
if err != nil {
|
||||
w.Add(err)
|
||||
@@ -222,16 +211,12 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
|
||||
if netID, ok := defNetItem["net_id"]; ok {
|
||||
req.NetID = uint64(netID.(int))
|
||||
}
|
||||
if reason, ok := defNetItem["reason"]; ok {
|
||||
req.Reason = reason.(string)
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().RG().SetDefNet(ctx, req)
|
||||
if err != nil {
|
||||
w.Add(err)
|
||||
}
|
||||
|
||||
d.Set("def_net_type", netType)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -242,10 +227,6 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
|
||||
RGID: apiResp,
|
||||
}
|
||||
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
req.Reason = reason.(string)
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().RG().Enable(ctx, req)
|
||||
if err != nil {
|
||||
w.Add(err)
|
||||
@@ -255,10 +236,6 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
|
||||
RGID: apiResp,
|
||||
}
|
||||
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
req.Reason = reason.(string)
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().RG().Disable(ctx, req)
|
||||
if err != nil {
|
||||
w.Add(err)
|
||||
@@ -287,7 +264,6 @@ func resourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interfa
|
||||
case status.Modeled:
|
||||
return diag.Errorf("The resource group is in status: %s, please, contact support for more information", rgData.Status)
|
||||
case status.Created:
|
||||
case status.Enabled:
|
||||
case status.Deleted:
|
||||
// restoreReq := rg.RestoreRequest{RGID: rgData.ID}
|
||||
// enableReq := rg.EnableRequest{RGID: rgData.ID}
|
||||
@@ -374,10 +350,6 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
|
||||
if restore, ok := d.GetOk("restore"); ok && restore.(bool) {
|
||||
restoreReq := rg.RestoreRequest{RGID: rgData.ID}
|
||||
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
restoreReq.Reason = reason.(string)
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().RG().Restore(ctx, restoreReq)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
@@ -386,9 +358,6 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
|
||||
if enable, ok := d.GetOk("enable"); ok && enable.(bool) {
|
||||
enableReq := rg.EnableRequest{RGID: rgData.ID}
|
||||
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
enableReq.Reason = reason.(string)
|
||||
}
|
||||
_, err = c.CloudAPI().RG().Enable(ctx, enableReq)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
@@ -423,26 +392,19 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
|
||||
|
||||
The following code fragment checks if any of these have been changed and generates error.
|
||||
*/
|
||||
if d.HasChange("def_net") {
|
||||
_, newDefNet := d.GetChange("def_net")
|
||||
if newDefNet.(*schema.Set).Len() == 0 {
|
||||
return diag.Errorf("resourceResgroupUpdate: block def_net must not be empty")
|
||||
}
|
||||
}
|
||||
|
||||
for _, attr := range []string{"def_net_type", "ipcidr", "ext_ip"} {
|
||||
attr_new, attr_old := d.GetChange("def_net_type")
|
||||
if attr_new.(string) != attr_old.(string) {
|
||||
attrNew, attrOld := d.GetChange(attr)
|
||||
if attrNew.(string) != attrOld.(string) {
|
||||
return diag.FromErr(fmt.Errorf("resourceResgroupUpdate: RG ID %s: changing %s for existing RG is not allowed", d.Id(), attr))
|
||||
}
|
||||
}
|
||||
|
||||
attrNew, attrOld := d.GetChange("ext_net_id")
|
||||
if attrNew.(int) != attrOld.(int) {
|
||||
if d.HasChange("ext_net_id") {
|
||||
return diag.FromErr(fmt.Errorf("resourceResgroupUpdate: RG ID %s: changing ext_net_id for existing RG is not allowed", d.Id()))
|
||||
}
|
||||
|
||||
if d.HasChanges("name", "quota", "description", "register_computes") {
|
||||
if d.HasChanges("name", "quota", "description", "register_computes", "uniq_pools") {
|
||||
if err := utilityUpdateRG(ctx, d, m, rgData.ID); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
@@ -477,9 +439,6 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
|
||||
RGID: rgData.ID,
|
||||
User: user,
|
||||
}
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
req.Reason = reason.(string)
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().RG().AccessRevoke(ctx, req)
|
||||
if err != nil {
|
||||
@@ -499,10 +458,6 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
|
||||
Right: right,
|
||||
}
|
||||
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
req.Reason = reason.(string)
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().RG().AccessGrant(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
@@ -511,30 +466,31 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter
|
||||
}
|
||||
|
||||
if d.HasChange("def_net") {
|
||||
if ok := d.HasChange("def_net"); ok {
|
||||
oldDefNet, newDefNet := d.GetChange("def_net")
|
||||
if newDefNet.(*schema.Set).Len() > 0 {
|
||||
changedDefNet := (newDefNet.(*schema.Set).Difference(oldDefNet.(*schema.Set))).List()
|
||||
for _, changedDefNetInterface := range changedDefNet {
|
||||
defNetItem := changedDefNetInterface.(map[string]interface{})
|
||||
netType := defNetItem["net_type"].(string)
|
||||
oldDefNet, newDefNet := d.GetChange("def_net")
|
||||
if oldDefNet.(*schema.Set).Len() > 0 {
|
||||
_, err := c.CloudAPI().RG().RemoveDefNet(ctx, rg.RemoveDefNetRequest{RGID: rgData.ID})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
if newDefNet.(*schema.Set).Len() > 0 {
|
||||
changedDefNet := (newDefNet.(*schema.Set).Difference(oldDefNet.(*schema.Set))).List()
|
||||
for _, changedDefNetInterface := range changedDefNet {
|
||||
defNetItem := changedDefNetInterface.(map[string]interface{})
|
||||
netType := defNetItem["net_type"].(string)
|
||||
|
||||
req := rg.SetDefNetRequest{
|
||||
RGID: rgData.ID,
|
||||
NetType: netType,
|
||||
}
|
||||
req := rg.SetDefNetRequest{
|
||||
RGID: rgData.ID,
|
||||
NetType: netType,
|
||||
}
|
||||
|
||||
if netID, ok := defNetItem["net_id"]; ok {
|
||||
req.NetID = uint64(netID.(int))
|
||||
}
|
||||
if reason, ok := defNetItem["reason"]; ok {
|
||||
req.Reason = reason.(string)
|
||||
}
|
||||
if netID, ok := defNetItem["net_id"]; ok {
|
||||
req.NetID = uint64(netID.(int))
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().RG().SetDefNet(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
_, err := c.CloudAPI().RG().SetDefNet(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -559,9 +515,6 @@ func resourceResgroupDelete(ctx context.Context, d *schema.ResourceData, m inter
|
||||
if permanently, ok := d.GetOk("permanently"); ok {
|
||||
req.Permanently = permanently.(bool)
|
||||
}
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
req.Reason = reason.(string)
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().RG().Delete(ctx, req)
|
||||
if err != nil {
|
||||
@@ -598,7 +551,6 @@ func ResourceRgSchemaMake() map[string]*schema.Schema {
|
||||
"def_net_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC", "NONE"}, false),
|
||||
Description: "Type of the network, which this resource group will use as default for its computes - PRIVATE or PUBLIC or NONE.",
|
||||
},
|
||||
@@ -700,11 +652,6 @@ func ResourceRgSchemaMake() map[string]*schema.Schema {
|
||||
Required: true,
|
||||
Description: "Access rights to set, one of 'R', 'RCX' or 'ARCXDU'",
|
||||
},
|
||||
"reason": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Reason for action",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -727,11 +674,6 @@ func ResourceRgSchemaMake() map[string]*schema.Schema {
|
||||
Default: 0,
|
||||
Description: "Network segment ID. If netType is PUBLIC and netId is 0 then default external network segment will be selected. If netType is PRIVATE and netId=0, the first ViNS defined for this RG will be selected. Otherwise, netId identifies either existing external network segment or ViNS.",
|
||||
},
|
||||
"reason": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Reason for action",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -754,11 +696,6 @@ func ResourceRgSchemaMake() map[string]*schema.Schema {
|
||||
Default: false,
|
||||
Description: "Set to True if you want force delete non-empty RG",
|
||||
},
|
||||
"reason": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Set to True if you want force delete non-empty RG",
|
||||
},
|
||||
"register_computes": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
@@ -876,6 +813,7 @@ func ResourceRgSchemaMake() map[string]*schema.Schema {
|
||||
},
|
||||
"uniq_pools": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
@@ -913,25 +851,18 @@ func ResourceResgroup() *schema.Resource {
|
||||
Default: &constants.Timeout300s,
|
||||
},
|
||||
|
||||
CustomizeDiff: func(ctx context.Context, diff *schema.ResourceDiff, i interface{}) error {
|
||||
if diff.HasChange("def_net") {
|
||||
diff.SetNewComputed("def_net_id")
|
||||
}
|
||||
if diff.HasChanges() {
|
||||
diff.SetNewComputed("updated_by")
|
||||
diff.SetNewComputed("updated_time")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
||||
Schema: ResourceRgSchemaMake(),
|
||||
CustomizeDiff: customdiff.All(
|
||||
customdiff.IfValueChange("def_net",
|
||||
func(ctx context.Context, oldValue, newValue, meta interface{}) bool {
|
||||
return true
|
||||
},
|
||||
func(ctx context.Context, d *schema.ResourceDiff, m interface{}) error {
|
||||
oldValue, newValue := d.GetChange("def_net")
|
||||
|
||||
old := len(oldValue.(*schema.Set).List())
|
||||
new_ := len(newValue.(*schema.Set).List())
|
||||
|
||||
if old == 1 && new_ == 0 {
|
||||
return fmt.Errorf("CustomizeDiff: block def_net must not be empty")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
),
|
||||
),
|
||||
StateUpgraders: []schema.StateUpgrader{
|
||||
{
|
||||
Type: resourceRGResourceV1().CoreConfigSchema().ImpliedType(),
|
||||
|
||||
@@ -52,9 +52,6 @@ func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m
|
||||
} else {
|
||||
req.RGID = uint64(d.Get("rg_id").(int))
|
||||
}
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
req.Reason = reason.(string)
|
||||
}
|
||||
|
||||
rgData, err := c.CloudAPI().RG().Get(ctx, req)
|
||||
if err != nil {
|
||||
@@ -137,8 +134,14 @@ func utilityUpdateRG(ctx context.Context, d *schema.ResourceData, m interface{},
|
||||
req.RegisterComputes = d.Get("register_computes").(bool)
|
||||
}
|
||||
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
req.Reason = reason.(string)
|
||||
if d.HasChange("uniq_pools") {
|
||||
uniqPools := d.Get("uniq_pools").([]interface{})
|
||||
if len(uniqPools) == 0 {
|
||||
req.ClearUniqPools = true
|
||||
}
|
||||
for _, pool := range uniqPools {
|
||||
req.UniqPools = append(req.UniqPools, pool.(string))
|
||||
}
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().RG().Update(ctx, req)
|
||||
|
||||
@@ -46,10 +46,6 @@ func utilityDataRgUsageCheckPresence(ctx context.Context, d *schema.ResourceData
|
||||
RGID: uint64(d.Get("rg_id").(int)),
|
||||
}
|
||||
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
req.Reason = reason.(string)
|
||||
}
|
||||
|
||||
usage, err := c.CloudAPI().RG().Usage(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -86,9 +86,17 @@ func resourceSnapshotDelete(ctx context.Context, d *schema.ResourceData, m inter
|
||||
Label: d.Get("label").(string),
|
||||
}
|
||||
|
||||
_, err := c.CloudAPI().Compute().SnapshotDelete(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
asyncMode, ok := d.GetOk("delete_async_mode")
|
||||
if ok && asyncMode.(bool) {
|
||||
_, err := c.CloudAPI().Compute().SnapshotDeleteAsync(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
} else {
|
||||
_, err := c.CloudAPI().Compute().SnapshotDelete(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
@@ -160,6 +168,11 @@ func resourceSnapshotSchemaMake() map[string]*schema.Schema {
|
||||
Computed: true,
|
||||
Description: "timestamp",
|
||||
},
|
||||
"delete_async_mode": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
Description: "async mode",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -154,6 +154,47 @@ func vnfInterfaceSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"mtu": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "mtu",
|
||||
},
|
||||
"libvirt_settings": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"txmode": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"ioeventfd": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"event_idx": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"queues": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"rx_queue_size": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"tx_queue_size": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@@ -170,7 +211,7 @@ func vnfInterfaceSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"node_id" : {
|
||||
"node_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
@@ -178,6 +219,10 @@ func vnfInterfaceSchemaMake() map[string]*schema.Schema {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"bus_number": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"qos": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
|
||||
@@ -83,6 +83,11 @@ func dataSourceVinsListSchemaMake() map[string]*schema.Schema {
|
||||
Optional: true,
|
||||
Description: "Filter by external IP address",
|
||||
},
|
||||
"vnf_dev_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Filter by VNF Device id",
|
||||
},
|
||||
"include_deleted": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
|
||||
@@ -87,25 +87,28 @@ func flattenInterfaces(interfaces []vins.ItemVNFInterface) []map[string]interfac
|
||||
res := make([]map[string]interface{}, 0, len(interfaces))
|
||||
for _, vnfInterface := range interfaces {
|
||||
temp := map[string]interface{}{
|
||||
"conn_id": vnfInterface.ConnID,
|
||||
"conn_type": vnfInterface.ConnType,
|
||||
"def_gw": vnfInterface.DefGW,
|
||||
"enabled": vnfInterface.Enabled,
|
||||
"flipgroup_id": vnfInterface.FLIPGroupID,
|
||||
"guid": vnfInterface.GUID,
|
||||
"ip_address": vnfInterface.IPAddress,
|
||||
"listen_ssh": vnfInterface.ListenSSH,
|
||||
"mac": vnfInterface.MAC,
|
||||
"name": vnfInterface.Name,
|
||||
"net_id": vnfInterface.NetID,
|
||||
"net_mask": vnfInterface.NetMask,
|
||||
"net_type": vnfInterface.NetType,
|
||||
"node_id": vnfInterface.NodeID,
|
||||
"pci_slot": vnfInterface.PCISlot,
|
||||
"qos": flattenQOS(vnfInterface.QOS),
|
||||
"target": vnfInterface.Target,
|
||||
"type": vnfInterface.Type,
|
||||
"vnfs": vnfInterface.VNFs,
|
||||
"conn_id": vnfInterface.ConnID,
|
||||
"conn_type": vnfInterface.ConnType,
|
||||
"def_gw": vnfInterface.DefGW,
|
||||
"enabled": vnfInterface.Enabled,
|
||||
"flipgroup_id": vnfInterface.FLIPGroupID,
|
||||
"guid": vnfInterface.GUID,
|
||||
"ip_address": vnfInterface.IPAddress,
|
||||
"listen_ssh": vnfInterface.ListenSSH,
|
||||
"mac": vnfInterface.MAC,
|
||||
"mtu": vnfInterface.MTU,
|
||||
"name": vnfInterface.Name,
|
||||
"net_id": vnfInterface.NetID,
|
||||
"net_mask": vnfInterface.NetMask,
|
||||
"net_type": vnfInterface.NetType,
|
||||
"node_id": vnfInterface.NodeID,
|
||||
"pci_slot": vnfInterface.PCISlot,
|
||||
"bus_number": vnfInterface.BusNumber,
|
||||
"qos": flattenQOS(vnfInterface.QOS),
|
||||
"target": vnfInterface.Target,
|
||||
"type": vnfInterface.Type,
|
||||
"vnfs": vnfInterface.VNFs,
|
||||
"libvirt_settings": flattenLibvirtSettings(vnfInterface.LibvirtSettings),
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
@@ -113,6 +116,21 @@ func flattenInterfaces(interfaces []vins.ItemVNFInterface) []map[string]interfac
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenLibvirtSettings(libvirtSettings vins.LibvirtSettings) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"guid": libvirtSettings.GUID,
|
||||
"txmode": libvirtSettings.TXMode,
|
||||
"ioeventfd": libvirtSettings.IOEventFD,
|
||||
"event_idx": libvirtSettings.EventIDx,
|
||||
"queues": libvirtSettings.Queues,
|
||||
"rx_queue_size": libvirtSettings.RXQueueSize,
|
||||
"tx_queue_size": libvirtSettings.TXQueueSize,
|
||||
}
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenVNFDev(vnfDev vins.RecordVNFDev) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
|
||||
@@ -66,6 +66,10 @@ func utilityVinsListCheckPresence(ctx context.Context, d *schema.ResourceData, m
|
||||
req.ExtIP = ext_ip.(string)
|
||||
}
|
||||
|
||||
if VNFDevId, ok := d.GetOk("vnfdev_id"); ok {
|
||||
req.VNFDevId = uint64(VNFDevId.(int))
|
||||
}
|
||||
|
||||
if includeDeleted, ok := d.GetOk("include_deleted"); ok {
|
||||
req.IncludeDeleted = includeDeleted.(bool)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user