This commit is contained in:
2024-08-26 18:22:06 +03:00
parent 6876b25f0e
commit 8ad6811e88
597 changed files with 52808 additions and 2129 deletions

View File

@@ -186,6 +186,10 @@ func ExistVins(ctx context.Context, vinsId uint64, c *controller.ControllerCfg)
func ExistVinses(ctx context.Context, vinsIds []uint64, c *controller.ControllerCfg) []error {
var errs []error
if len(vinsIds) == 0 {
return errs
}
req := cb_vins.ListRequest{
IncludeDeleted: false,
}
@@ -217,6 +221,10 @@ func ExistVinses(ctx context.Context, vinsIds []uint64, c *controller.Controller
func ExistExtNets(ctx context.Context, extNetIds []uint64, c *controller.ControllerCfg) []error {
var errs []error
if len(extNetIds) == 0 {
return errs
}
req := cb_extnet.ListRequest{}
extNetList, err := c.CloudBroker().ExtNet().List(ctx, req)
@@ -246,6 +254,10 @@ func ExistExtNets(ctx context.Context, extNetIds []uint64, c *controller.Control
func ExistVFPools(ctx context.Context, vfpoolIds []uint64, c *controller.ControllerCfg) []error {
var errs []error
if len(vfpoolIds) == 0 {
return errs
}
req := cb_vfpool.ListRequest{}
vfpoolList, err := c.CloudBroker().VFPool().List(ctx, req)

View File

@@ -167,16 +167,16 @@ func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interfac
}
if lbSysctlParams, ok := d.GetOk("lb_sysctl_params"); ok {
syscrlSliceMaps := lbSysctlParams.([]map[string]string)
syscrlSliceMaps := lbSysctlParams.([]interface{})
res := make([]map[string]interface{}, 0, len(syscrlSliceMaps))
for _, syscrlMap := range syscrlSliceMaps {
tempMap := make(map[string]interface{})
for k, v := range syscrlMap {
if intVal, err := strconv.Atoi(v); err == nil {
for k, v := range syscrlMap.(map[string]interface{}) {
if intVal, err := strconv.Atoi(v.(string)); err == nil {
tempMap[k] = intVal
continue
}
tempMap[k] = v
tempMap[k] = v.(string)
}
res = append(res, tempMap)
}
@@ -428,7 +428,7 @@ func resourceK8sCPDelete(ctx context.Context, d *schema.ResourceData, m interfac
c := m.(*controller.ControllerCfg)
req := k8s.DeleteRequest{
K8SID: k8sData.ID,
K8SID: k8sData.ID,
}
if val, ok := d.GetOk("permanently"); ok {
@@ -586,25 +586,28 @@ func handleStart(ctx context.Context, c *controller.ControllerCfg, start bool, k
func handleUpdateLbSysctlParams(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, k8sData *k8s.RecordK8S) error {
lbSysctlParams := d.Get("lb_sysctl_params").([]map[string]string)
lbSysctlParams := d.Get("lb_sysctl_params").([]interface{})
res := make([]map[string]interface{}, 0, len(lbSysctlParams))
for _, syscrlMap := range lbSysctlParams {
tempMap := make(map[string]interface{})
for k, v := range syscrlMap {
if intVal, err := strconv.Atoi(v); err == nil {
for k, v := range syscrlMap.(map[string]interface{}) {
if intVal, err := strconv.Atoi(v.(string)); err == nil {
tempMap[k] = intVal
continue
}
tempMap[k] = v
tempMap[k] = v.(string)
}
res = append(res, tempMap)
}
req := lb.UpdateSysctParamsRequest{
LBID: k8sData.LBID,
SysctlParams: res,
}
if len(res) > 0 {
req := lb.UpdateSysctParamsRequest{
LBID: k8sData.LBID,
SysctlParams: res,
}
_, err := c.CloudBroker().LB().UpdateSysctlParams(ctx, req)
return err
_, err := c.CloudBroker().LB().UpdateSysctlParams(ctx, req)
return err
}
return nil
}

View File

@@ -193,12 +193,6 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"depresent": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "whether to depresent compute disks from node or not",
},
"desc": {
Type: schema.TypeString,
Computed: true,
@@ -2849,6 +2843,12 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Optional text description of this compute instance.",
},
"depresent": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "whether to depresent compute disks from node or not",
},
"started": {
Type: schema.TypeBool,
Optional: true,

View File

@@ -71,16 +71,16 @@ func resourceLBCreate(ctx context.Context, d *schema.ResourceData, m interface{}
req.HighlyAvailable = haMode.(bool)
}
if sysctlParams, ok := d.GetOk("sysctl_params"); ok {
syscrlSliceMaps := sysctlParams.([]map[string]string)
syscrlSliceMaps := sysctlParams.([]interface{})
res := make([]map[string]interface{}, 0, len(syscrlSliceMaps))
for _, syscrlMap := range syscrlSliceMaps {
tempMap := make(map[string]interface{})
for k, v := range syscrlMap {
if intVal, err := strconv.Atoi(v); err == nil {
for k, v := range syscrlMap.(map[string]interface{}) {
if intVal, err := strconv.Atoi(v.(string)); err == nil {
tempMap[k] = intVal
continue
}
tempMap[k] = v
tempMap[k] = v.(string)
}
res = append(res, tempMap)
}
@@ -349,26 +349,28 @@ func resourceLbEnable(ctx context.Context, lbId uint64, m interface{}) error {
func resourceLbChangeSysctlParams(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
syscrlSliceMaps := d.Get("sysctl_params").([]map[string]string)
syscrlSliceMaps := d.Get("sysctl_params").([]interface{})
res := make([]map[string]interface{}, 0, len(syscrlSliceMaps))
for _, syscrlMap := range syscrlSliceMaps {
tempMap := make(map[string]interface{})
for k, v := range syscrlMap {
if intVal, err := strconv.Atoi(v); err == nil {
for k, v := range syscrlMap.(map[string]interface{}) {
if intVal, err := strconv.Atoi(v.(string)); err == nil {
tempMap[k] = intVal
continue
}
tempMap[k] = v
tempMap[k] = v.(string)
}
res = append(res, tempMap)
}
req := lb.UpdateSysctParamsRequest{
LBID: lbId,
SysctlParams: res,
if len(res) > 0 {
req := lb.UpdateSysctParamsRequest{
LBID: lbId,
SysctlParams: res,
}
_, err := c.CloudBroker().LB().UpdateSysctlParams(ctx, req)
return err
}
_, err := c.CloudBroker().LB().UpdateSysctlParams(ctx, req)
return err
return nil
}
func resourceLbDisable(ctx context.Context, lbId uint64, m interface{}) error {

View File

@@ -150,32 +150,32 @@ func resourceLBBackendUpdate(ctx context.Context, d *schema.ResourceData, m inte
BackendName: d.Get("name").(string),
}
if d.HasChange("algorithm") {
req.Algorithm = d.Get("algorithm").(string)
if algorithm, ok := d.GetOk("algorithm"); ok {
req.Algorithm = algorithm.(string)
}
if d.HasChange("inter") {
req.Inter = uint64(d.Get("inter").(int))
if inter, ok := d.GetOk("inter"); ok {
req.Inter = uint64(inter.(int))
}
if d.HasChange("downinter") {
req.DownInter = uint64(d.Get("downinter").(int))
if downinter, ok := d.GetOk("downinter"); ok {
req.DownInter = uint64(downinter.(int))
}
if d.HasChange("rise") {
req.Rise = uint64(d.Get("rise").(int))
if rise, ok := d.GetOk("rise"); ok {
req.Rise = uint64(rise.(int))
}
if d.HasChange("fall") {
req.Fall = uint64(d.Get("fall").(int))
if fall, ok := d.GetOk("fall"); ok {
req.Fall = uint64(fall.(int))
}
if d.HasChange("slowstart") {
req.SlowStart = uint64(d.Get("slowstart").(int))
if slowstart, ok := d.GetOk("slowstart"); ok {
req.SlowStart = uint64(slowstart.(int))
}
if d.HasChange("maxconn") {
req.MaxConn = uint64(d.Get("maxconn").(int))
if maxconn, ok := d.GetOk("maxconn"); ok {
req.MaxConn = uint64(maxconn.(int))
}
if d.HasChange("maxqueue") {
req.MaxQueue = uint64(d.Get("maxqueue").(int))
if maxqueue, ok := d.GetOk("maxqueue"); ok {
req.MaxQueue = uint64(maxqueue.(int))
}
if d.HasChange("weight") {
req.Weight = uint64(d.Get("weight").(int))
if weight, ok := d.GetOk("weight"); ok {
req.Weight = uint64(weight.(int))
}
_, err := c.CloudBroker().LB().BackendUpdate(ctx, req)

View File

@@ -161,32 +161,32 @@ func resourceLBBackendServerUpdate(ctx context.Context, d *schema.ResourceData,
Port: uint64(d.Get("port").(int)),
}
if d.HasChange("check") {
req.Check = d.Get("check").(string)
if check, ok := d.GetOk("check"); ok {
req.Check = check.(string)
}
if d.HasChange("inter") {
req.Inter = uint64(d.Get("inter").(int))
if inter, ok := d.GetOk("inter"); ok {
req.Inter = uint64(inter.(int))
}
if d.HasChange("downinter") {
req.DownInter = uint64(d.Get("downinter").(int))
if downinter, ok := d.GetOk("downinter"); ok {
req.DownInter = uint64(downinter.(int))
}
if d.HasChange("rise") {
req.Rise = uint64(d.Get("rise").(int))
if rise, ok := d.GetOk("rise"); ok {
req.Rise = uint64(rise.(int))
}
if d.HasChange("fall") {
req.Fall = uint64(d.Get("fall").(int))
if fall, ok := d.GetOk("fall"); ok {
req.Fall = uint64(fall.(int))
}
if d.HasChange("slowstart") {
req.SlowStart = uint64(d.Get("slowstart").(int))
if slowstart, ok := d.GetOk("slowstart"); ok {
req.SlowStart = uint64(slowstart.(int))
}
if d.HasChange("maxconn") {
req.MaxConn = uint64(d.Get("maxconn").(int))
if maxconn, ok := d.GetOk("maxconn"); ok {
req.MaxConn = uint64(maxconn.(int))
}
if d.HasChange("maxqueue") {
req.MaxQueue = uint64(d.Get("maxqueue").(int))
if maxqueue, ok := d.GetOk("maxqueue"); ok {
req.MaxQueue = uint64(maxqueue.(int))
}
if d.HasChange("weight") {
req.Weight = uint64(d.Get("weight").(int))
if weight, ok := d.GetOk("weight"); ok {
req.Weight = uint64(weight.(int))
}
_, err := c.CloudBroker().LB().BackendServerUpdate(ctx, req)

View File

@@ -185,7 +185,20 @@ func flattenNicInfo(infos node.ListNicInfo) []map[string]interface{} {
"num_vfs": item.NumVFS,
"os_name": item.OSName,
"pci_slot": item.PCISlot,
"vf_list": flattenNodeItem(item.VFList),
"vf_list": flattenVFList(item.VFList),
}
res = append(res, temp)
}
return res
}
func flattenVFList(vfList []interface{}) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(vfList))
for _, v := range vfList {
vConv := v.(map[string]interface{})
temp := map[string]interface{}{
"fn_id": vConv["fnId"],
"pci_slot": vConv["pciSlot"],
}
res = append(res, temp)
}

View File

@@ -164,8 +164,17 @@ func dataSourceNodeSchemaMake() map[string]*schema.Schema {
"vf_list": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"fn_id": {
Type: schema.TypeInt,
Computed: true,
},
"pci_slot": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
},
@@ -477,8 +486,17 @@ func dataSourceNodeListSchemaMake() map[string]*schema.Schema {
"vf_list": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"fn_id": {
Type: schema.TypeInt,
Computed: true,
},
"pci_slot": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
},

View File

@@ -34,6 +34,7 @@ package sep
import (
"context"
"encoding/json"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
@@ -69,7 +70,7 @@ func resourceSepCreate(ctx context.Context, d *schema.ResourceData, m interface{
}
var consumedNIDs []uint64
for _, item := range d.Get("consumed_by").([]interface{}) {
for _, item := range d.Get("consumed_by").(*schema.Set).List() {
consumedNIDs = append(consumedNIDs, uint64(item.(int)))
}
@@ -172,84 +173,9 @@ func resourceSepDelete(ctx context.Context, d *schema.ResourceData, m interface{
}
func resourceSepUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceSepUpdate: called for %s, id: %d", d.Get("name").(string), d.Get("sep_id").(int))
c := m.(*controller.ControllerCfg)
return diag.Errorf(
"SEP upgrade is not possible via terraform")
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
if d.HasChange("account_ids") {
err := resourceSepChangeAccess(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("access_to_pool") {
err := resourceSepChangeAccessToPool(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("decommission") {
err := resourceSepDecommission(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("upd_capacity_limit") {
err := resourceSepUpdateCapacityLimit(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("config") {
err := resourceSepUpdateConfig(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("field_edit") {
err := resourceSepFieldEdit(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("enable") {
err := resourceSepChangeEnabled(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("consumed_by") {
err := resourceSepUpdateNodes(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("provided_by") {
err := resourceSepUpdateProviders(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("pools") {
err := resourceSepChangePools(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
return resourceSepRead(ctx, d, m)
}
func resourceSepChangeAccess(ctx context.Context, d *schema.ResourceData, m interface{}) error {
@@ -371,10 +297,47 @@ func resourceSepChangePools(ctx context.Context, d *schema.ResourceData, m inter
newPoolsList := newPoolsInterface.(*schema.Set).Difference(oldPoolsInterface.(*schema.Set)).List()
for _, pool := range newPoolsList {
poolItem := pool.(map[string]interface{})
accessAccountIDs := []uint64{}
for _, v := range poolItem["access_account_ids"].([]interface{}) {
accessAccountIDs = append(accessAccountIDs, uint64(v.(int)))
}
accessResGroupIDs := []uint64{}
for _, v := range poolItem["access_res_group_ids"].([]interface{}) {
accessResGroupIDs = append(accessResGroupIDs, uint64(v.(int)))
}
types := []string{}
for _, v := range poolItem["types"].([]interface{}) {
types = append(types, v.(string))
}
uris := []UrisModel{}
list := poolItem["uris"].(*schema.Set).List()
for _, v := range list {
if m, ok := v.(map[string]interface{}); ok {
uris = append(uris, UrisModel{IP: m["ip"].(string),
Port: uint64(m["port"].(int)),
})
}
}
poolValue := PoolModel{
AccessAccountIDs: accessAccountIDs,
AccessResGroupIDs: accessResGroupIDs,
Name: poolItem["name"].(string),
Types: types,
Uris: uris,
UsageLimit: uint64(poolItem["usage_limit"].(int)),
}
marshalPool, _ := json.Marshal(poolValue)
log.Debugf(string(marshalPool))
addPoolReq := sep.AddPoolRequest{
SEPID: uint64(d.Get("sep_id").(int)),
Sync: true,
Pool: pool.(string),
Pool: string(marshalPool),
}
_, err := c.CloudBroker().SEP().AddPool(ctx, addPoolReq)
@@ -386,43 +349,6 @@ func resourceSepChangePools(ctx context.Context, d *schema.ResourceData, m inter
return nil
}
func resourceSepDecommission(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
decommission := d.Get("decommission").(bool)
if decommission {
req := sep.DecommissionRequest{
SEPID: uint64(d.Get("sep_id").(int)),
ClearPhisically: d.Get("clear_physically").(bool),
}
_, err := c.CloudBroker().SEP().Decommission(ctx, req)
if err != nil {
return err
}
}
return nil
}
func resourceSepUpdateCapacityLimit(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
updCapacityLimit := d.Get("upd_capacity_limit").(bool)
if updCapacityLimit {
req := sep.UpdateCapacityLimitRequest{
SEPID: uint64(d.Get("sep_id").(int)),
}
_, err := c.CloudBroker().SEP().UpdateCapacityLimit(ctx, req)
if err != nil {
return err
}
}
return nil
}
func resourceSepUpdateConfig(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
@@ -581,3 +507,18 @@ func ResourceSep() *schema.Resource {
Schema: resourceSepSchemaMake(),
}
}
type PoolModel struct {
AccessAccountIDs []uint64 `json:"accessAccountIds"`
AccessResGroupIDs []uint64 `json:"accessResGroupIds"`
Name string `json:"name"`
Types []string `json:"types"`
Uris []UrisModel `json:"uris"`
UsageLimit uint64 `json:"usage_limit"`
}
type UrisModel struct {
IP string `json:"ip"`
Port uint64 `json:"port"`
}

View File

@@ -533,67 +533,78 @@ func resourceSepSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
},
},
"clear_physically": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "clear disks and images physically",
},
"decommission": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "unlink everything that exists from SEP",
},
"enable": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "enable SEP after creation",
},
"field_edit": {
Type: schema.TypeList,
MaxItems: 1,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"field_name": {
Type: schema.TypeString,
Required: true,
Description: "field name",
},
"field_value": {
Type: schema.TypeString,
Required: true,
Description: "field value",
},
"field_type": {
Type: schema.TypeString,
Required: true,
Description: "field type",
},
},
},
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "sep type des id",
},
"upd_capacity_limit": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Update SEP capacity limit",
},
"pools": {
Type: schema.TypeSet,
Optional: true,
Description: "add/delete pools to/from sep",
Elem: &schema.Schema{
Type: schema.TypeString,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"access_account_ids": {
Type: schema.TypeList,
Required: true,
Description: "access account ids",
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"access_res_group_ids": {
Type: schema.TypeList,
Required: true,
Description: "access res group ids",
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "name",
},
"types": {
Type: schema.TypeList,
Optional: true,
Description: "types",
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"uris": {
Type: schema.TypeSet,
Optional: true,
Description: "uris",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"ip": {
Type: schema.TypeString,
Required: true,
Description: "ip",
},
"port": {
Type: schema.TypeInt,
Required: true,
Description: "port",
},
},
},
},
"usage_limit": {
Type: schema.TypeInt,
Required: true,
Description: "usage limit",
},
},
},
},
"ckey": {
@@ -610,9 +621,10 @@ func resourceSepSchemaMake() map[string]*schema.Schema {
},
},
"config": {
Type: schema.TypeString,
Required: true,
Description: "sep config string",
Type: schema.TypeString,
Required: true,
Description: "sep config string",
DiffSuppressFunc: resourceSepDiffSupperss,
},
"consumed_by": {
Type: schema.TypeSet,

View File

@@ -34,6 +34,8 @@ package sep
import (
"context"
"encoding/json"
"reflect"
"strconv"
log "github.com/sirupsen/logrus"
@@ -67,3 +69,15 @@ func utilitySepCheckPresence(ctx context.Context, d *schema.ResourceData, m inte
return sep, nil
}
func resourceSepDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool {
var v1, v2 interface{}
json.Unmarshal([]byte(newVal), &v1)
json.Unmarshal([]byte(oldVal), &v2)
if reflect.DeepEqual(v1, v2) {
log.Debugf("resourceSepDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=TRUE", key, oldVal, newVal)
return true
}
log.Debugf("resourceSepDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal)
return false
}

View File

@@ -33,6 +33,7 @@ package user
import (
"context"
"github.com/google/uuid"
log "github.com/sirupsen/logrus"

View File

@@ -139,7 +139,7 @@ func flattenUserList(users *user.ListUsers) []map[string]interface{} {
temp := map[string]interface{}{
"ckey": item.CKey,
"meta": flattens.FlattenMeta(item.Meta),
"api_access": item.APIAccess,
"apiaccess": item.APIAccess,
"active": item.Active,
"authkey": item.AuthKey,
"authkeys": flattenItemUser(item.AuthKeys),

View File

@@ -2018,7 +2018,7 @@ func resourceVinsSchemaMake() map[string]*schema.Schema {
"type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"DHCP", "VIP", "EXCLUDE"}, false),
ValidateFunc: validation.StringInSlice([]string{"DHCP", "VIP", "EXCLUDED"}, false),
},
"ip_addr": {
Type: schema.TypeString,